From 3b2c32a9b1de0165b92ae03407640afd3bebefff Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 8 Jul 2022 20:01:37 -0700 Subject: [PATCH 001/321] mm: discard __GFP_ATOMIC __GFP_ATOMIC serves little purpose. Its main effect is to set ALLOC_HARDER which adds a few little boosts to increase the chance of an allocation succeeding, one of which is to lower the water-mark at which it will succeed. It is *always* paired with __GFP_HIGH which sets ALLOC_HIGH which also adjusts this watermark. It is probable that other users of __GFP_HIGH should benefit from the other little bonuses that __GFP_ATOMIC gets. __GFP_ATOMIC also gives a warning if used with __GFP_DIRECT_RECLAIM. There is little point to this. We already get a might_sleep() warning if __GFP_DIRECT_RECLAIM is set. __GFP_ATOMIC allows the "watermark_boost" to be side-stepped. It is probable that testing ALLOC_HARDER is a better fit here. __GFP_ATOMIC is used by tegra-smmu.c to check if the allocation might sleep. This should test __GFP_DIRECT_RECLAIM instead. This patch: - removes __GFP_ATOMIC - causes __GFP_HIGH to set ALLOC_HARDER unless __GFP_NOMEMALLOC is set (as well as ALLOC_HIGH). - makes other adjustments as suggested by the above. The net result is not change to GFP_ATOMIC allocations. Other allocations that use __GFP_HIGH will benefit from a few different extra privileges. This affects: xen, dm, md, ntfs3 the vermillion frame buffer hibernation ksm swap all of which likely produce more benefit than cost if these selected allocation are more likely to succeed quickly. Link: https://lkml.kernel.org/r/163712397076.13692.4727608274002939094@noble.neil.brown.name Signed-off-by: NeilBrown Reviewed-by: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Thierry Reding Cc: Mel Gorman Signed-off-by: Andrew Morton --- Documentation/mm/balance.rst | 2 +- drivers/iommu/tegra-smmu.c | 4 ++-- include/linux/gfp.h | 12 ++++-------- include/trace/events/mmflags.h | 1 - lib/test_printf.c | 8 ++++---- mm/internal.h | 2 +- mm/page_alloc.c | 16 ++++------------ tools/include/linux/gfp.h | 3 +-- tools/perf/builtin-kmem.c | 1 - 9 files changed, 17 insertions(+), 32 deletions(-) diff --git a/Documentation/mm/balance.rst b/Documentation/mm/balance.rst index 6a1fadf3e1735..e38e9d83c1c72 100644 --- a/Documentation/mm/balance.rst +++ b/Documentation/mm/balance.rst @@ -6,7 +6,7 @@ Memory Balancing Started Jan 2000 by Kanoj Sarcar -Memory balancing is needed for !__GFP_ATOMIC and !__GFP_KSWAPD_RECLAIM as +Memory balancing is needed for !__GFP_HIGH and !__GFP_KSWAPD_RECLAIM as well as for non __GFP_IO allocations. The first reason why a caller may avoid reclaim is that the caller can not diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 1fea68e551f13..2f2b120336186 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -671,12 +671,12 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as, * allocate page in a sleeping context if GFP flags permit. Hence * spinlock needs to be unlocked and re-locked after allocation. */ - if (!(gfp & __GFP_ATOMIC)) + if (gfp & __GFP_DIRECT_RECLAIM) spin_unlock_irqrestore(&as->lock, *flags); page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO); - if (!(gfp & __GFP_ATOMIC)) + if (gfp & __GFP_DIRECT_RECLAIM) spin_lock_irqsave(&as->lock, *flags); /* diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2d2ccae933c20..9a88cce23e173 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,7 +39,7 @@ struct vm_area_struct; #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u #define ___GFP_ZERO 0x100u -#define ___GFP_ATOMIC 0x200u +/* 0x200u unused */ #define ___GFP_DIRECT_RECLAIM 0x400u #define ___GFP_KSWAPD_RECLAIM 0x800u #define ___GFP_WRITE 0x1000u @@ -124,11 +124,8 @@ struct vm_area_struct; * * %__GFP_HIGH indicates that the caller is high-priority and that granting * the request is necessary before the system can make forward progress. - * For example, creating an IO context to clean pages. - * - * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is - * high priority. Users are typically interrupt handlers. This may be - * used in conjunction with %__GFP_HIGH + * For example creating an IO context to clean pages and requests + * from atomic context. * * %__GFP_MEMALLOC allows access to all memory. This should only be used when * the caller guarantees the allocation will allow more memory to be freed @@ -143,7 +140,6 @@ struct vm_area_struct; * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the %__GFP_MEMALLOC flag if both are set. */ -#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) @@ -337,7 +333,7 @@ struct vm_area_struct; * version does not attempt reclaim/compaction at all and is by default used * in page fault path, while the non-light is used by khugepaged. */ -#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index e87cb2b80ed3c..11524cda4a955 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -31,7 +31,6 @@ gfpflag_string(__GFP_HIGHMEM), \ gfpflag_string(GFP_DMA32), \ gfpflag_string(__GFP_HIGH), \ - gfpflag_string(__GFP_ATOMIC), \ gfpflag_string(__GFP_IO), \ gfpflag_string(__GFP_FS), \ gfpflag_string(__GFP_NOWARN), \ diff --git a/lib/test_printf.c b/lib/test_printf.c index 07309c45f3279..8010de49b6c5d 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -673,17 +673,17 @@ flags(void) gfp = GFP_ATOMIC|__GFP_DMA; test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp); - gfp = __GFP_ATOMIC; - test("__GFP_ATOMIC", "%pGg", &gfp); + gfp = __GFP_HIGH; + test("__GFP_HIGH", "%pGg", &gfp); /* Any flags not translated by the table should remain numeric */ gfp = ~__GFP_BITS_MASK; snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp); test(cmp_buffer, "%pGg", &gfp); - snprintf(cmp_buffer, BUF_SIZE, "__GFP_ATOMIC|%#lx", + snprintf(cmp_buffer, BUF_SIZE, "__GFP_HIGH|%#lx", (unsigned long) gfp); - gfp |= __GFP_ATOMIC; + gfp |= __GFP_HIGH; test(cmp_buffer, "%pGg", &gfp); kfree(cmp_buffer); diff --git a/mm/internal.h b/mm/internal.h index c0f8fbe0445b5..7cf12a15475b9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -24,7 +24,7 @@ struct folio_batch; #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ - __GFP_ATOMIC|__GFP_NOLOCKDEP) + __GFP_NOLOCKDEP) /* The GFP flags allowed during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c9c02b23f02f3..6ead871546ec9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3984,12 +3984,12 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, free_pages)) return true; /* - * Ignore watermark boosting for GFP_ATOMIC order-0 allocations + * Ignore watermark boosting for GFP_HIGH order-0 allocations * when checking the min watermark. The min watermark is the * point where boosting is ignored so that kswapd is woken up * when below the low watermark. */ - if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost + if (unlikely(!order && (alloc_flags & ALLOC_HARDER) && z->watermark_boost && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { mark = z->_watermark[WMARK_MIN]; return __zone_watermark_ok(z, order, mark, highest_zoneidx, @@ -4724,12 +4724,12 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will - * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). + * set both ALLOC_HARDER (unless __GFP_NOMEMALLOC) and ALLOC_HIGH. */ alloc_flags |= (__force int) (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); - if (gfp_mask & __GFP_ATOMIC) { + if (gfp_mask & __GFP_HIGH) { /* * Not worth trying to allocate harder for __GFP_NOMEMALLOC even * if it can't schedule. @@ -4922,14 +4922,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned int cpuset_mems_cookie; int reserve_flags; - /* - * We also sanity check to catch abuse of atomic reserves being used by - * callers that are not in atomic context. - */ - if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == - (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) - gfp_mask &= ~__GFP_ATOMIC; - retry_cpuset: compaction_retries = 0; no_progress_loops = 0; diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h index b238dbc9eb858..56eec4445bc9e 100644 --- a/tools/include/linux/gfp.h +++ b/tools/include/linux/gfp.h @@ -12,7 +12,6 @@ #define __GFP_FS 0x80u #define __GFP_NOWARN 0x200u #define __GFP_ZERO 0x8000u -#define __GFP_ATOMIC 0x80000u #define __GFP_ACCOUNT 0x100000u #define __GFP_DIRECT_RECLAIM 0x400000u #define __GFP_KSWAPD_RECLAIM 0x2000000u @@ -20,7 +19,7 @@ #define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM) #define GFP_ZONEMASK 0x0fu -#define GFP_ATOMIC (__GFP_HIGH | __GFP_ATOMIC | __GFP_KSWAPD_RECLAIM) +#define GFP_ATOMIC (__GFP_HIGH | __GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index ebfab2ca17024..4a06d83f2ac5a 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -640,7 +640,6 @@ static const struct { { "__GFP_HIGHMEM", "HM" }, { "GFP_DMA32", "D32" }, { "__GFP_HIGH", "H" }, - { "__GFP_ATOMIC", "_A" }, { "__GFP_IO", "I" }, { "__GFP_FS", "F" }, { "__GFP_NOWARN", "NWR" }, From cd9282704120bb6ff7c978d75f6addca510c15d8 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 8 Jul 2022 20:01:37 -0700 Subject: [PATCH 002/321] mips: rename mt_init to mips_mt_init Move mt_init out of the way for the maple tree. Use mips_mt prefix to match the rest of the functions in the file. Link: https://lkml.kernel.org/r/20220504002554.654642-2-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/mips/kernel/mips-mt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index d5f7362e8c245..dc023a9798035 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -230,7 +230,7 @@ void mips_mt_set_cpuoptions(void) struct class *mt_class; -static int __init mt_init(void) +static int __init mips_mt_init(void) { struct class *mtc; @@ -243,4 +243,4 @@ static int __init mt_init(void) return 0; } -subsys_initcall(mt_init); +subsys_initcall(mips_mt_init); From fdd6ab16a21d46bb2faa3ee3285ec1cf0988f163 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 20 Jun 2022 21:09:09 -0400 Subject: [PATCH 003/321] android: binder: stop saving a pointer to the VMA Do not record a pointer to a VMA outside of the mmap_lock for later use. This is unsafe and there are a number of failure paths *after* the recorded VMA pointer may be freed during setup. There is no callback to the driver to clear the saved pointer from generic mm code. Furthermore, the VMA pointer may become stale if any number of VMA operations end up freeing the VMA so saving it was fragile to being with. Instead, change the binder_alloc struct to record the start address of the VMA and use vma_lookup() to get the vma when needed. Add lockdep mmap_lock checks on updates to the vma pointer to ensure the lock is held and depend on that lock for synchronization of readers and writers - which was already the case anyways, so the smp_wmb()/smp_rmb() was not necessary. Link: https://lkml.kernel.org/r/20220621140212.vpkio64idahetbyf@revolver Fixes: da1b9564e85b ("android: binder: fix the race mmap and alloc_new_buf_locked") Reported-by: syzbot+58b51ac2b04e388ab7b0@syzkaller.appspotmail.com Signed-off-by: Liam R. Howlett Cc: Minchan Kim Cc: Christian Brauner (Microsoft) Cc: Greg Kroah-Hartman Cc: Hridya Valsaraju Cc: Joel Fernandes Cc: Martijn Coenen Cc: Suren Baghdasaryan Cc: Todd Kjos Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- drivers/android/binder_alloc.c | 30 ++++++++++++++---------------- drivers/android/binder_alloc.h | 2 +- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 51b502217d000..f555eebceef6b 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, if (mm) { mmap_read_lock(mm); - vma = alloc->vma; + vma = vma_lookup(mm, alloc->vma_addr); } if (!vma && need_mm) { @@ -313,16 +313,15 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, static inline void binder_alloc_set_vma(struct binder_alloc *alloc, struct vm_area_struct *vma) { - if (vma) + unsigned long vm_start = 0; + + if (vma) { + vm_start = vma->vm_start; alloc->vma_vm_mm = vma->vm_mm; - /* - * If we see alloc->vma is not NULL, buffer data structures set up - * completely. Look at smp_rmb side binder_alloc_get_vma. - * We also want to guarantee new alloc->vma_vm_mm is always visible - * if alloc->vma is set. - */ - smp_wmb(); - alloc->vma = vma; + } + + mmap_assert_write_locked(alloc->vma_vm_mm); + alloc->vma_addr = vm_start; } static inline struct vm_area_struct *binder_alloc_get_vma( @@ -330,11 +329,9 @@ static inline struct vm_area_struct *binder_alloc_get_vma( { struct vm_area_struct *vma = NULL; - if (alloc->vma) { - /* Look at description in binder_alloc_set_vma */ - smp_rmb(); - vma = alloc->vma; - } + if (alloc->vma_addr) + vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr); + return vma; } @@ -817,7 +814,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) buffers = 0; mutex_lock(&alloc->mutex); - BUG_ON(alloc->vma); + BUG_ON(alloc->vma_addr && + vma_lookup(alloc->vma_vm_mm, alloc->vma_addr)); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 7dea57a84c79b..1e4fd37af5e03 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -100,7 +100,7 @@ struct binder_lru_page { */ struct binder_alloc { struct mutex mutex; - struct vm_area_struct *vma; + unsigned long vma_addr; struct mm_struct *vma_vm_mm; void __user *buffer; struct list_head buffers; From 568269f3095870d5c4a8ea48dfb17bf3c6026bac Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 21 Jun 2022 19:16:17 -0700 Subject: [PATCH 004/321] android-binder-stop-saving-a-pointer-to-the-vma-fix fix drivers/android/binder_alloc_selftest.c drivers/android/binder_alloc_selftest.c: In function 'binder_selftest_alloc': drivers/android/binder_alloc_selftest.c:290:43: error: 'struct binder_alloc' has no member named 'vma' 290 | if (!binder_selftest_run || !alloc->vma) Cc: Christian Brauner (Microsoft) Cc: Greg Kroah-Hartman Cc: Hridya Valsaraju Cc: Joel Fernandes Cc: "Liam R. Howlett" Cc: Martijn Coenen Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Suren Baghdasaryan Cc: Todd Kjos Signed-off-by: Andrew Morton --- drivers/android/binder_alloc_selftest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index c2b323bc3b3a5..43a881073a428 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc) if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); - if (!binder_selftest_run || !alloc->vma) + if (!binder_selftest_run || !alloc->vma_addr) goto done; pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0); From 934e348131501d4c96b7501a655de89d319b6809 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Mon, 27 Jun 2022 15:18:59 +0000 Subject: [PATCH 005/321] android: binder: fix lockdep check on clearing vma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When munmapping a vma, the mmap_lock can be degraded to a write before calling close() on the file handle. The binder close() function calls binder_alloc_set_vma() to clear the vma address, which now has a lock dep check for writing on the mmap_lock. Change the lockdep check to ensure the reading lock is held while clearing and keep the write check while writing. Link: https://lkml.kernel.org/r/20220627151857.2316964-1-Liam.Howlett@oracle.com Fixes: 472a68df605b ("android: binder: stop saving a pointer to the VMA") Signed-off-by: Liam R. Howlett Reported-by: syzbot+da54fa8d793ca89c741f@syzkaller.appspotmail.com Acked-by: Todd Kjos Cc: "Arve Hjønnevåg" Cc: Christian Brauner (Microsoft) Cc: Greg Kroah-Hartman Cc: Hridya Valsaraju Cc: Joel Fernandes Cc: Martijn Coenen Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- drivers/android/binder_alloc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index f555eebceef6b..1014beb128025 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -315,12 +315,19 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc, { unsigned long vm_start = 0; + /* + * Allow clearing the vma with holding just the read lock to allow + * munmapping downgrade of the write lock before freeing and closing the + * file using binder_alloc_vma_close(). + */ if (vma) { vm_start = vma->vm_start; alloc->vma_vm_mm = vma->vm_mm; + mmap_assert_write_locked(alloc->vma_vm_mm); + } else { + mmap_assert_locked(alloc->vma_vm_mm); } - mmap_assert_write_locked(alloc->vma_vm_mm); alloc->vma_addr = vm_start; } From d0056b2bc0ba065825171b4ddd971479c0e7980e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:47 +0000 Subject: [PATCH 006/321] Maple Tree: add new data structure Patch series "Introducing the Maple Tree". The maple tree is an RCU-safe range based B-tree designed to use modern processor cache efficiently. There are a number of places in the kernel that a non-overlapping range-based tree would be beneficial, especially one with a simple interface. If you use an rbtree with other data structures to improve performance or an interval tree to track non-overlapping ranges, then this is for you. The tree has a branching factor of 10 for non-leaf nodes and 16 for leaf nodes. With the increased branching factor, it is significantly shorter than the rbtree so it has fewer cache misses. The removal of the linked list between subsequent entries also reduces the cache misses and the need to pull in the previous and next VMA during many tree alterations. The first user that is covered in this patch set is the vm_area_struct, where three data structures are replaced by the maple tree: the augmented rbtree, the vma cache, and the linked list of VMAs in the mm_struct. The long term goal is to reduce or remove the mmap_lock contention. The plan is to get to the point where we use the maple tree in RCU mode. Readers will not block for writers. A single write operation will be allowed at a time. A reader re-walks if stale data is encountered. VMAs would be RCU enabled and this mode would be entered once multiple tasks are using the mm_struct. Davidlor said : Yes I like the maple tree, and at this stage I don't think we can ask for : more from this series wrt the MM - albeit there seems to still be some : folks reporting breakage. Fundamentally I see Liam's work to (re)move : complexity out of the MM (not to say that the actual maple tree is not : complex) by consolidating the three complimentary data structures very : much worth it considering performance does not take a hit. This was very : much a turn off with the range locking approach, which worst case scenario : incurred in prohibitive overhead. Also as Liam and Matthew have : mentioned, RCU opens up a lot of nice performance opportunities, and in : addition academia[1] has shown outstanding scalability of address spaces : with the foundation of replacing the locked rbtree with RCU aware trees. A similar work has been discovered in the academic press https://pdos.csail.mit.edu/papers/rcuvm:asplos12.pdf Sheer coincidence. We designed our tree with the intention of solving the hardest problem first. Upon settling on a b-tree variant and a rough outline, we researched ranged based b-trees and RCU b-trees and did find that article. So it was nice to find reassurances that we were on the right path, but our design choice of using ranges made that paper unusable for us. This patch (of 69): The maple tree is an RCU-safe range based B-tree designed to use modern processor cache efficiently. There are a number of places in the kernel that a non-overlapping range-based tree would be beneficial, especially one with a simple interface. If you use an rbtree with other data structures to improve performance or an interval tree to track non-overlapping ranges, then this is for you. The tree has a branching factor of 10 for non-leaf nodes and 16 for leaf nodes. With the increased branching factor, it is significantly shorter than the rbtree so it has fewer cache misses. The removal of the linked list between subsequent entries also reduces the cache misses and the need to pull in the previous and next VMA during many tree alterations. The first user that is covered in this patch set is the vm_area_struct, where three data structures are replaced by the maple tree: the augmented rbtree, the vma cache, and the linked list of VMAs in the mm_struct. The long term goal is to reduce or remove the mmap_lock contention. The plan is to get to the point where we use the maple tree in RCU mode. Readers will not block for writers. A single write operation will be allowed at a time. A reader re-walks if stale data is encountered. VMAs would be RCU enabled and this mode would be entered once multiple tasks are using the mm_struct. Link: https://lkml.kernel.org/r/20220621204632.3370049-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220504010716.661115-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220504002554.654642-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220504010716.661115-3-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220511144304.1430851-2-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220517145913.3480729-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220517152209.3486724-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220519150304.1289636-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220607063834.7004-1-lukas.bulwahn@gmail.com Link: https://lkml.kernel.org/r/20220615141921.417598-2-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220615141921.417598-3-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220616011739.802669-3-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220615174213.738849-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220617134609.1771611-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-2-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Tested-by: David Howells Tested-by: Sven Schnelle Signed-off-by: Lukas Bulwahn Cc: Catalin Marinas Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- Documentation/core-api/index.rst | 1 + Documentation/core-api/maple_tree.rst | 217 + MAINTAINERS | 12 + include/linux/maple_tree.h | 685 ++ include/trace/events/maple_tree.h | 123 + init/main.c | 2 + lib/Kconfig.debug | 15 + lib/Makefile | 3 +- lib/maple_tree.c | 7041 +++++++++++++++++ tools/testing/radix-tree/.gitignore | 2 + tools/testing/radix-tree/generated/autoconf.h | 1 + tools/testing/radix-tree/linux/maple_tree.h | 7 + tools/testing/radix-tree/maple.c | 59 + .../radix-tree/trace/events/maple_tree.h | 5 + 14 files changed, 8172 insertions(+), 1 deletion(-) create mode 100644 Documentation/core-api/maple_tree.rst create mode 100644 include/linux/maple_tree.h create mode 100644 include/trace/events/maple_tree.h create mode 100644 lib/maple_tree.c create mode 100644 tools/testing/radix-tree/linux/maple_tree.h create mode 100644 tools/testing/radix-tree/maple.c create mode 100644 tools/testing/radix-tree/trace/events/maple_tree.h diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 5b1188494bcd8..86ae4dbca1f59 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -36,6 +36,7 @@ Library functionality that is used throughout the kernel. kref assoc_array xarray + maple_tree idr circular-buffers rbtree diff --git a/Documentation/core-api/maple_tree.rst b/Documentation/core-api/maple_tree.rst new file mode 100644 index 0000000000000..45defcf15da71 --- /dev/null +++ b/Documentation/core-api/maple_tree.rst @@ -0,0 +1,217 @@ +.. SPDX-License-Identifier: GPL-2.0+ + + +========== +Maple Tree +========== + +:Author: Liam R. Howlett + +Overview +======== + +The Maple Tree is a B-Tree data type which is optimized for storing +non-overlapping ranges, including ranges of size 1. The tree was designed to +be simple to use and does not require a user written search method. It +supports iterating over a range of entries and going to the previous or next +entry in a cache-efficient manner. The tree can also be put into an RCU-safe +mode of operation which allows reading and writing concurrently. Writers must +synchronize on a lock, which can be the default spinlock, or the user can set +the lock to an external lock of a different type. + +The Maple Tree maintains a small memory footprint and was designed to use +modern processor cache efficiently. The majority of the users will be able to +use the normal API. An :ref:`maple-tree-advanced-api` exists for more complex +scenarios. The most important usage of the Maple Tree is the tracking of the +virtual memory areas. + +The Maple Tree can store values between ``0`` and ``ULONG_MAX``. The Maple +Tree reserves values with the bottom two bits set to '10' which are below 4096 +(ie 2, 6, 10 .. 4094) for internal use. If the entries may use reserved +entries then the users can convert the entries using xa_mk_value() and convert +them back by calling xa_to_value(). If the user needs to use a reserved +value, then the user can convert the value when using the +:ref:`maple-tree-advanced-api`, but are blocked by the normal API. + +The Maple Tree can also be configured to support searching for a gap of a given +size (or larger). + +Pre-allocating of nodes is also supported using the +:ref:`maple-tree-advanced-api`. This is useful for users who must guarantee a +successful store operation within a given +code segment when allocating cannot be done. Allocations of nodes are +relatively small at around 256 bytes. + +.. _maple-tree-normal-api: + +Normal API +========== + +Start by initialising a maple tree, either with DEFINE_MTREE() for statically +allocated maple trees or mt_init() for dynamically allocated ones. A +freshly-initialised maple tree contains a ``NULL`` pointer for the range ``0`` +- ``ULONG_MAX``. There are currently two types of maple trees supported: the +allocation tree and the regular tree. The regular tree has a higher branching +factor for internal nodes. The allocation tree has a lower branching factor +but allows the user to search for a gap of a given size or larger from either +``0`` upwards or ``ULONG_MAX`` down. An allocation tree can be used by +passing in the ``MT_FLAGS_ALLOC_RANGE`` flag when initialising the tree. + +You can then set entries using mtree_store() or mtree_store_range(). +mtree_store() will overwrite any entry with the new entry and return 0 on +success or an error code otherwise. mtree_store_range() works in the same way +but takes a range. mtree_load() is used to retrieve the entry stored at a +given index. You can use mtree_erase() to erase an entire range by only +knowing one value within that range, or mtree_store() call with an entry of +NULL may be used to partially erase a range or many ranges at once. + +If you want to only store a new entry to a range (or index) if that range is +currently ``NULL``, you can use mtree_insert_range() or mtree_insert() which +return -EEXIST if the range is not empty. + +You can search for an entry from an index upwards by using mt_find(). + +You can walk each entry within a range by calling mt_for_each(). You must +provide a temporary variable to store a cursor. If you want to walk each +element of the tree then ``0`` and ``ULONG_MAX`` may be used as the range. If +the caller is going to hold the lock for the duration of the walk then it is +worth looking at the mas_for_each() API in the :ref:`maple-tree-advanced-api` +section. + +Sometimes it is necessary to ensure the next call to store to a maple tree does +not allocate memory, please see :ref:`maple-tree-advanced-api` for this use case. + +Finally, you can remove all entries from a maple tree by calling +mtree_destroy(). If the maple tree entries are pointers, you may wish to free +the entries first. + +Allocating Nodes +---------------- + +The allocations are handled by the internal tree code. See +:ref:`maple-tree-advanced-alloc` for other options. + +Locking +------- + +You do not have to worry about locking. See :ref:`maple-tree-advanced-locks` +for other options. + +The Maple Tree uses RCU and an internal spinlock to synchronise access: + +Takes RCU read lock: + * mtree_load() + * mt_find() + * mt_for_each() + * mt_next() + * mt_prev() + +Takes ma_lock internally: + * mtree_store() + * mtree_store_range() + * mtree_insert() + * mtree_insert_range() + * mtree_erase() + * mtree_destroy() + * mt_set_in_rcu() + * mt_clear_in_rcu() + +If you want to take advantage of the internal lock to protect the data +structures that you are storing in the Maple Tree, you can call mtree_lock() +before calling mtree_load(), then take a reference count on the object you +have found before calling mtree_unlock(). This will prevent stores from +removing the object from the tree between looking up the object and +incrementing the refcount. You can also use RCU to avoid dereferencing +freed memory, but an explanation of that is beyond the scope of this +document. + +.. _maple-tree-advanced-api: + +Advanced API +============ + +The advanced API offers more flexibility and better performance at the +cost of an interface which can be harder to use and has fewer safeguards. +You must take care of your own locking while using the advanced API. +You can use the ma_lock, RCU or an external lock for protection. +You can mix advanced and normal operations on the same array, as long +as the locking is compatible. The :ref:`maple-tree-normal-api` is implemented +in terms of the advanced API. + +The advanced API is based around the ma_state, this is where the 'mas' +prefix originates. The ma_state struct keeps track of tree operations to make +life easier for both internal and external tree users. + +Initialising the maple tree is the same as in the :ref:`maple-tree-normal-api`. +Please see above. + +The maple state keeps track of the range start and end in mas->index and +mas->last, respectively. + +mas_walk() will walk the tree to the location of mas->index and set the +mas->index and mas->last according to the range for the entry. + +You can set entries using mas_store(). mas_store() will overwrite any entry +with the new entry and return the first existing entry that is overwritten. +The range is passed in as members of the maple state: index and last. + +You can use mas_erase() to erase an entire range by setting index and +last of the maple state to the desired range to erase. This will erase +the first range that is found in that range, set the maple state index +and last as the range that was erased and return the entry that existed +at that location. + +You can walk each entry within a range by using mas_for_each(). If you want +to walk each element of the tree then ``0`` and ``ULONG_MAX`` may be used as +the range. If the lock needs to be periodically dropped, see the locking +section mas_pause(). + +Using a maple state allows mas_next() and mas_prev() to function as if the +tree was a linked list. With such a high branching factor the amortized +performance penalty is outweighed by cache optimization. mas_next() will +return the next entry which occurs after the entry at index. mas_prev() +will return the previous entry which occurs before the entry at index. + +mas_find() will find the first entry which exists at or above index on +the first call, and the next entry from every subsequent calls. + +mas_find_rev() will find the fist entry which exists at or below the last on +the first call, and the previous entry from every subsequent calls. + +If the user needs to yield the lock during an operation, then the maple state +must be paused using mas_pause(). + +There are a few extra interfaces provided when using an allocation tree. +If you wish to search for a gap within a range, then mas_empty_area() +or mas_empty_area_rev() can be used. mas_empty_area() searches for a gap +starting at the lowest index given up to the maximum of the range. +mas_empty_area_rev() searches for a gap starting at the highest index given +and continues downward to the lower bound of the range. + +.. _maple-tree-advanced-alloc: + +Advanced Allocating Nodes +------------------------- + +Allocations are usually handled internally to the tree, however if allocations +need to occur before a write occurs then calling mas_expected_entries() will +allocate the worst-case number of needed nodes to insert the provided number of +ranges. This also causes the tree to enter mass insertion mode. Once +insertions are complete calling mas_destroy() on the maple state will free the +unused allocations. + +.. _maple-tree-advanced-locks: + +Advanced Locking +---------------- + +The maple tree uses a spinlock by default, but external locks can be used for +tree updates as well. To use an external lock, the tree must be initialized +with the ``MT_FLAGS_LOCK_EXTERN flag``, this is usually done with the +MTREE_INIT_EXT() #define, which takes an external lock as an argument. + +Functions and structures +======================== + +.. kernel-doc:: include/linux/maple_tree.h +.. kernel-doc:: lib/maple_tree.c diff --git a/MAINTAINERS b/MAINTAINERS index 55fb1daa9057e..770fab3967288 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11826,6 +11826,18 @@ L: linux-man@vger.kernel.org S: Maintained W: http://www.kernel.org/doc/man-pages +MAPLE TREE +M: Liam R. Howlett +L: linux-mm@kvack.org +S: Supported +F: Documentation/core-api/maple_tree.rst +F: include/linux/maple_tree.h +F: include/trace/events/maple_tree.h +F: lib/maple_tree.c +F: lib/test_maple_tree.c +F: tools/testing/radix-tree/linux/maple_tree.h +F: tools/testing/radix-tree/maple.c + MARDUK (CREATOR CI40) DEVICE TREE SUPPORT M: Rahul Bedarkar L: linux-mips@vger.kernel.org diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h new file mode 100644 index 0000000000000..3c66423589045 --- /dev/null +++ b/include/linux/maple_tree.h @@ -0,0 +1,685 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_MAPLE_TREE_H +#define _LINUX_MAPLE_TREE_H +/* + * Maple Tree - An RCU-safe adaptive tree for storing ranges + * Copyright (c) 2018-2022 Oracle + * Authors: Liam R. Howlett + * Matthew Wilcox + */ + +#include +#include +#include +/* #define CONFIG_MAPLE_RCU_DISABLED */ +/* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */ + +/* + * Allocated nodes are mutable until they have been inserted into the tree, + * at which time they cannot change their type until they have been removed + * from the tree and an RCU grace period has passed. + * + * Removed nodes have their ->parent set to point to themselves. RCU readers + * check ->parent before relying on the value that they loaded from the + * slots array. This lets us reuse the slots array for the RCU head. + * + * Nodes in the tree point to their parent unless bit 0 is set. + */ +#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) +/* 64bit sizes */ +#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */ +#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */ +#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */ +#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */ +#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1) +#else +/* 32bit sizes */ +#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */ +#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */ +#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */ +#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */ +#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2) +#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */ + +#define MAPLE_NODE_MASK 255UL + +/* + * The node->parent of the root node has bit 0 set and the rest of the pointer + * is a pointer to the tree itself. No more bits are available in this pointer + * (on m68k, the data structure may only be 2-byte aligned). + * + * Internal non-root nodes can only have maple_range_* nodes as parents. The + * parent pointer is 256B aligned like all other tree nodes. When storing a 32 + * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an + * extra bit to store the offset. This extra bit comes from a reuse of the last + * bit in the node type. This is possible by using bit 1 to indicate if bit 2 + * is part of the type or the slot. + * + * Once the type is decided, the decision of an allocation range type or a range + * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE + * flag. + * + * Node types: + * 0x??1 = Root + * 0x?00 = 16 bit nodes + * 0x010 = 32 bit nodes + * 0x110 = 64 bit nodes + * + * Slot size and location in the parent pointer: + * type : slot location + * 0x??1 : Root + * 0x?00 : 16 bit values, type in 0-1, slot in 2-6 + * 0x010 : 32 bit values, type in 0-2, slot in 3-6 + * 0x110 : 64 bit values, type in 0-2, slot in 3-6 + */ +typedef struct maple_enode *maple_enode; /* encoded node */ +typedef struct maple_pnode *maple_pnode; /* parent node */ + +/* + * This metadata is used to optimize the gap updating code and in reverse + * searching for gaps or any other code that needs to find the end of the data. + */ +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +/* + * Leaf nodes do not store pointers to nodes, they store user data. Users may + * store almost any bit pattern. As noted above, the optimisation of storing an + * entry at 0 in the root pointer cannot be done for data which have the bottom + * two bits set to '10'. We also reserve values with the bottom two bits set to + * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs + * return errnos as a negative errno shifted right by two bits and the bottom + * two bits set to '10', and while choosing to store these values in the array + * is not an error, it may lead to confusion if you're testing for an error with + * mas_is_err(). + * + * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits + * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now. + * + * In regular B-Tree terms, pivots are called keys. The term pivot is used to + * indicate that the tree is specifying ranges, Pivots may appear in the + * subtree with an entry attached to the value whereas keys are unique to a + * specific position of a B-tree. Pivot values are inclusive of the slot with + * the same index. + */ + +struct maple_range_64 { + struct maple_pnode *parent; + unsigned long pivot[MAPLE_RANGE64_SLOTS - 1]; + union { + void __rcu *slot[MAPLE_RANGE64_SLOTS]; + struct { + void __rcu *pad[MAPLE_RANGE64_SLOTS - 1]; + struct maple_metadata meta; + }; + }; +}; + +/* + * At tree creation time, the user can specify that they're willing to trade off + * storing fewer entries in a tree in return for storing more information in + * each node. + * + * The maple tree supports recording the largest range of NULL entries available + * in this node, also called gaps. This optimises the tree for allocating a + * range. + */ +struct maple_arange_64 { + struct maple_pnode *parent; + unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1]; + void __rcu *slot[MAPLE_ARANGE64_SLOTS]; + unsigned long gap[MAPLE_ARANGE64_SLOTS]; + struct maple_metadata meta; +}; + +struct maple_alloc { + unsigned long total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[MAPLE_ALLOC_SLOTS]; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; /* Overlaps the pivot */ +}; + +enum maple_type { + maple_dense, + maple_leaf_64, + maple_range_64, + maple_arange_64, +}; + + +/** + * DOC: Maple tree flags + * + * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree + * * MT_FLAGS_USE_RCU - Operate in RCU mode + * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags + * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value + * * MT_FLAGS_LOCK_MASK - How the mt_lock is used + * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe + * * MT_FLAGS_LOCK_BH - Acquired bh-safe + * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used + * + * MAPLE_HEIGHT_MAX The largest height that can be stored + */ +#define MT_FLAGS_ALLOC_RANGE 0x01 +#define MT_FLAGS_USE_RCU 0x02 +#define MT_FLAGS_HEIGHT_OFFSET 0x02 +#define MT_FLAGS_HEIGHT_MASK 0x7C +#define MT_FLAGS_LOCK_MASK 0x300 +#define MT_FLAGS_LOCK_IRQ 0x100 +#define MT_FLAGS_LOCK_BH 0x200 +#define MT_FLAGS_LOCK_EXTERN 0x300 + +#define MAPLE_HEIGHT_MAX 31 + + +#define MAPLE_NODE_TYPE_MASK 0x0F +#define MAPLE_NODE_TYPE_SHIFT 0x03 + +#define MAPLE_RESERVED_RANGE 4096 + +#ifdef CONFIG_LOCKDEP +typedef struct lockdep_map *lockdep_map_p; +#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock) +#define mt_set_external_lock(mt, lock) \ + (mt)->ma_external_lock = &(lock)->dep_map +#else +typedef struct { /* nothing */ } lockdep_map_p; +#define mt_lock_is_held(mt) 1 +#define mt_set_external_lock(mt, lock) do { } while (0) +#endif + +/* + * If the tree contains a single entry at index 0, it is usually stored in + * tree->ma_root. To optimise for the page cache, an entry which ends in '00', + * '01' or '11' is stored in the root, but an entry which ends in '10' will be + * stored in a node. Bits 3-6 are used to store enum maple_type. + * + * The flags are used both to store some immutable information about this tree + * (set at tree creation time) and dynamic information set under the spinlock. + * + * Another use of flags are to indicate global states of the tree. This is the + * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in + * RCU mode. This mode was added to allow the tree to reuse nodes instead of + * re-allocating and RCU freeing nodes when there is a single user. + */ +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + void __rcu *ma_root; + unsigned int ma_flags; +}; + +/** + * MTREE_INIT() - Initialize a maple tree + * @name: The maple tree name + * @flags: The maple tree flags + * + */ +#define MTREE_INIT(name, flags) { \ + .ma_lock = __SPIN_LOCK_UNLOCKED(name.ma_lock), \ + .ma_flags = flags, \ + .ma_root = NULL, \ +} + +/** + * MTREE_INIT_EXT() - Initialize a maple tree with an external lock. + * @name: The tree name + * @flags: The maple tree flags + * @lock: The external lock + */ +#ifdef CONFIG_LOCKDEP +#define MTREE_INIT_EXT(name, flags, lock) { \ + .ma_external_lock = &(lock).dep_map, \ + .ma_flags = flags, \ + .ma_root = NULL, \ +} +#else +#define MTREE_INIT_EXT(name, flags, lock) MTREE_INIT(name, flags) +#endif + +#define DEFINE_MTREE(name) \ + struct maple_tree name = MTREE_INIT(name, 0) + +#define mtree_lock(mt) spin_lock((&(mt)->ma_lock)) +#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock)) + +/* + * The Maple Tree squeezes various bits in at various points which aren't + * necessarily obvious. Usually, this is done by observing that pointers are + * N-byte aligned and thus the bottom log_2(N) bits are available for use. We + * don't use the high bits of pointers to store additional information because + * we don't know what bits are unused on any given architecture. + * + * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8 + * low bits for our own purposes. Nodes are currently of 4 types: + * 1. Single pointer (Range is 0-0) + * 2. Non-leaf Allocation Range nodes + * 3. Non-leaf Range nodes + * 4. Leaf Range nodes All nodes consist of a number of node slots, + * pivots, and a parent pointer. + */ + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void __rcu *slot[MAPLE_NODE_SLOTS]; + }; + struct { + void *pad; + struct rcu_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +/* + * More complicated stores can cause two nodes to become one or three and + * potentially alter the height of the tree. Either half of the tree may need + * to be rebalanced against the other. The ma_topiary struct is used to track + * which nodes have been 'cut' from the tree so that the change can be done + * safely at a later date. This is done to support RCU. + */ +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +void *mtree_load(struct maple_tree *mt, unsigned long index); + +int mtree_insert(struct maple_tree *mt, unsigned long index, + void *entry, gfp_t gfp); +int mtree_insert_range(struct maple_tree *mt, unsigned long first, + unsigned long last, void *entry, gfp_t gfp); +int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long size, unsigned long min, + unsigned long max, gfp_t gfp); +int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long size, unsigned long min, + unsigned long max, gfp_t gfp); + +int mtree_store_range(struct maple_tree *mt, unsigned long first, + unsigned long last, void *entry, gfp_t gfp); +int mtree_store(struct maple_tree *mt, unsigned long index, + void *entry, gfp_t gfp); +void *mtree_erase(struct maple_tree *mt, unsigned long index); + +void mtree_destroy(struct maple_tree *mt); +void __mt_destroy(struct maple_tree *mt); + +/** + * mtree_empty() - Determine if a tree has any present entries. + * @mt: Maple Tree. + * + * Context: Any context. + * Return: %true if the tree contains only NULL pointers. + */ +static inline bool mtree_empty(const struct maple_tree *mt) +{ + return mt->ma_root == NULL; +} + +/* Advanced API */ + +/* + * The maple state is defined in the struct ma_state and is used to keep track + * of information during operations, and even between operations when using the + * advanced API. + * + * If state->node has bit 0 set then it references a tree location which is not + * a node (eg the root). If bit 1 is set, the rest of the bits are a negative + * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the + * node type. + * + * state->alloc either has a request number of nodes or an allocated node. If + * stat->alloc has a requested number of nodes, the first bit will be set (0x1) + * and the remaining bits are the value. If state->alloc is a node, then the + * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for + * storing more allocated nodes, a total number of nodes allocated, and the + * node_count in this node. node_count is the number of allocated nodes in this + * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further + * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc + * by removing a node from the state->alloc node until state->alloc->node_count + * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted + * to state->alloc. Nodes are pushed onto state->alloc by putting the current + * state->alloc into the pushed node's slot[0]. + * + * The state also contains the implied min/max of the state->node, the depth of + * this search, and the offset. The implied min/max are either from the parent + * node or are 0-oo for the root node. The depth is incremented or decremented + * every time a node is walked down or up. The offset is the slot/pivot of + * interest in the node - either for reading or writing. + * + * When returning a value the maple state index and last respectively contain + * the start and end of the range for the entry. Ranges are inclusive in the + * Maple Tree. + */ +struct ma_state { + struct maple_tree *tree; /* The tree we're operating in */ + unsigned long index; /* The index we're operating on - range start */ + unsigned long last; /* The last index we're operating on - range end */ + struct maple_enode *node; /* The node containing this entry */ + unsigned long min; /* The minimum index of this node - implied pivot min */ + unsigned long max; /* The maximum index of this node - implied pivot max */ + struct maple_alloc *alloc; /* Allocated nodes for this operation */ + unsigned char depth; /* depth of tree descent during write */ + unsigned char offset; + unsigned char mas_flags; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; /* Decoded mas->node */ + unsigned long r_min; /* range min */ + unsigned long r_max; /* range max */ + enum maple_type type; /* mas->node type */ + unsigned char offset_end; /* The offset where the write ends */ + unsigned char node_end; /* mas->node end */ + unsigned long *pivots; /* mas->node->pivots pointer */ + unsigned long end_piv; /* The pivot at the offset end */ + void __rcu **slots; /* mas->node->slots pointer */ + void *entry; /* The entry to write */ + void *content; /* The existing entry that is being overwritten */ +}; + +#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) +#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock)) + + +/* + * Special values for ma_state.node. + * MAS_START means we have not searched the tree. + * MAS_ROOT means we have searched the tree and the entry we found lives in + * the root of the tree (ie it has index 0, length 1 and is the only entry in + * the tree). + * MAS_NONE means we have searched the tree and there is no node in the + * tree for this entry. For example, we searched for index 1 in an empty + * tree. Or we have a tree which points to a full leaf node and we + * searched for an entry which is larger than can be contained in that + * leaf node. + * MA_ERROR represents an errno. After dropping the lock and attempting + * to resolve the error, the walk would have to be restarted from the + * top of the tree as the tree may have been modified. + */ +#define MAS_START ((struct maple_enode *)1UL) +#define MAS_ROOT ((struct maple_enode *)5UL) +#define MAS_NONE ((struct maple_enode *)9UL) +#define MAS_PAUSE ((struct maple_enode *)17UL) +#define MA_ERROR(err) \ + ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) + +#define MA_STATE(name, mt, first, end) \ + struct ma_state name = { \ + .tree = mt, \ + .index = first, \ + .last = end, \ + .node = MAS_START, \ + .min = 0, \ + .max = ULONG_MAX, \ + } + +#define MA_WR_STATE(name, ma_state, wr_entry) \ + struct ma_wr_state name = { \ + .mas = ma_state, \ + .content = NULL, \ + .entry = wr_entry, \ + } + +#define MA_TOPIARY(name, tree) \ + struct ma_topiary name = { \ + .head = NULL, \ + .tail = NULL, \ + .mtree = tree, \ + } + +void *mas_walk(struct ma_state *mas); +void *mas_store(struct ma_state *mas, void *entry); +void *mas_erase(struct ma_state *mas); +int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp); +void mas_store_prealloc(struct ma_state *mas, void *entry); +void *mas_find(struct ma_state *mas, unsigned long max); +void *mas_find_rev(struct ma_state *mas, unsigned long min); +int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp); + +bool mas_nomem(struct ma_state *mas, gfp_t gfp); +void mas_pause(struct ma_state *mas); +void maple_tree_init(void); +void mas_destroy(struct ma_state *mas); +int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries); + +void *mas_prev(struct ma_state *mas, unsigned long min); +void *mas_next(struct ma_state *mas, unsigned long max); + +int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, + unsigned long size); + +/* Checks if a mas has not found anything */ +static inline bool mas_is_none(struct ma_state *mas) +{ + return mas->node == MAS_NONE; +} + +/* Checks if a mas has been paused */ +static inline bool mas_is_paused(struct ma_state *mas) +{ + return mas->node == MAS_PAUSE; +} + +void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas); +void mas_dup_store(struct ma_state *mas, void *entry); + +/* + * This finds an empty area from the highest address to the lowest. + * AKA "Topdown" version, + */ +int mas_empty_area_rev(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size); +/** + * mas_reset() - Reset a Maple Tree operation state. + * @mas: Maple Tree operation state. + * + * Resets the error or walk state of the @mas so future walks of the + * array will start from the root. Use this if you have dropped the + * lock and want to reuse the ma_state. + * + * Context: Any context. + */ +static inline void mas_reset(struct ma_state *mas) +{ + mas->node = MAS_START; +} + +/** + * mas_for_each() - Iterate over a range of the maple tree. + * @mas: Maple Tree operation state (maple_state) + * @entry: Entry retrieved from the tree + * @max: maximum index to retrieve from the tree + * + * When returned, mas->index and mas->last will hold the entire range for the + * entry. + * + * Note: may return the zero entry. + * + */ +#define mas_for_each(mas, entry, max) \ + while (((entry) = mas_find((mas), (max))) != NULL) + + +/** + * mas_set_range() - Set up Maple Tree operation state for a different index. + * @mas: Maple Tree operation state. + * @start: New start of range in the Maple Tree. + * @last: New end of range in the Maple Tree. + * + * Move the operation state to refer to a different range. This will + * have the effect of starting a walk from the top; see mas_next() + * to move to an adjacent index. + */ +static inline +void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) +{ + mas->index = start; + mas->last = last; + mas->node = MAS_START; +} + +/** + * mas_set() - Set up Maple Tree operation state for a different index. + * @mas: Maple Tree operation state. + * @index: New index into the Maple Tree. + * + * Move the operation state to refer to a different index. This will + * have the effect of starting a walk from the top; see mas_next() + * to move to an adjacent index. + */ +static inline void mas_set(struct ma_state *mas, unsigned long index) +{ + + mas_set_range(mas, index, index); +} + +static inline bool mt_external_lock(const struct maple_tree *mt) +{ + return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN; +} + +/** + * mt_init_flags() - Initialise an empty maple tree with flags. + * @mt: Maple Tree + * @flags: maple tree flags. + * + * If you need to initialise a Maple Tree with special flags (eg, an + * allocation tree), use this function. + * + * Context: Any context. + */ +static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags) +{ + mt->ma_flags = flags; + if (!mt_external_lock(mt)) + spin_lock_init(&mt->ma_lock); + rcu_assign_pointer(mt->ma_root, NULL); +} + +/** + * mt_init() - Initialise an empty maple tree. + * @mt: Maple Tree + * + * An empty Maple Tree. + * + * Context: Any context. + */ +static inline void mt_init(struct maple_tree *mt) +{ + mt_init_flags(mt, 0); +} + +static inline bool mt_in_rcu(struct maple_tree *mt) +{ +#ifdef CONFIG_MAPLE_RCU_DISABLED + return false; +#endif + return mt->ma_flags & MT_FLAGS_USE_RCU; +} + +/** + * mt_clear_in_rcu() - Switch the tree to non-RCU mode. + * @mt: The Maple Tree + */ +static inline void mt_clear_in_rcu(struct maple_tree *mt) +{ + if (!mt_in_rcu(mt)) + return; + + if (mt_external_lock(mt)) { + BUG_ON(!mt_lock_is_held(mt)); + mt->ma_flags &= ~MT_FLAGS_USE_RCU; + } else { + mtree_lock(mt); + mt->ma_flags &= ~MT_FLAGS_USE_RCU; + mtree_unlock(mt); + } +} + +/** + * mt_set_in_rcu() - Switch the tree to RCU safe mode. + * @mt: The Maple Tree + */ +static inline void mt_set_in_rcu(struct maple_tree *mt) +{ + if (mt_in_rcu(mt)) + return; + + if (mt_external_lock(mt)) { + BUG_ON(!mt_lock_is_held(mt)); + mt->ma_flags |= MT_FLAGS_USE_RCU; + } else { + mtree_lock(mt); + mt->ma_flags |= MT_FLAGS_USE_RCU; + mtree_unlock(mt); + } +} + +void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max); +void *mt_find_after(struct maple_tree *mt, unsigned long *index, + unsigned long max); +void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min); +void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); + +/** + * mt_for_each - Iterate over each entry starting at index until max. + * @tree: The Maple Tree + * @entry: The current entry + * @index: The index to update to track the location in the tree + * @max: The maximum limit for @index + * + * Note: Will not return the zero entry. + */ +#define mt_for_each(tree, entry, index, max) \ + for (entry = mt_find(tree, &(index), max); \ + entry; entry = mt_find_after(tree, &(index), max)) + + +#ifdef CONFIG_DEBUG_MAPLE_TREE +extern atomic_t maple_tree_tests_run; +extern atomic_t maple_tree_tests_passed; + +void mt_dump(const struct maple_tree *mt); +void mt_validate(struct maple_tree *mt); +#define MT_BUG_ON(tree, x) do { \ + atomic_inc(&maple_tree_tests_run); \ + if (x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, x); \ + mt_dump(tree); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ +} while (0) +#else +#define MT_BUG_ON(tree, x) BUG_ON(x) +#endif /* CONFIG_DEBUG_MAPLE_TREE */ + +#endif /*_LINUX_MAPLE_TREE_H */ diff --git a/include/trace/events/maple_tree.h b/include/trace/events/maple_tree.h new file mode 100644 index 0000000000000..2be403bdc2bd8 --- /dev/null +++ b/include/trace/events/maple_tree.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM maple_tree + +#if !defined(_TRACE_MM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MM_H + + +#include + +struct ma_state; + +TRACE_EVENT(ma_op, + + TP_PROTO(const char *fn, struct ma_state *mas), + + TP_ARGS(fn, mas), + + TP_STRUCT__entry( + __field(const char *, fn) + __field(unsigned long, min) + __field(unsigned long, max) + __field(unsigned long, index) + __field(unsigned long, last) + __field(void *, node) + ), + + TP_fast_assign( + __entry->fn = fn; + __entry->min = mas->min; + __entry->max = mas->max; + __entry->index = mas->index; + __entry->last = mas->last; + __entry->node = mas->node; + ), + + TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu", + __entry->fn, + (void *) __entry->node, + (unsigned long) __entry->min, + (unsigned long) __entry->max, + (unsigned long) __entry->index, + (unsigned long) __entry->last + ) +) +TRACE_EVENT(ma_read, + + TP_PROTO(const char *fn, struct ma_state *mas), + + TP_ARGS(fn, mas), + + TP_STRUCT__entry( + __field(const char *, fn) + __field(unsigned long, min) + __field(unsigned long, max) + __field(unsigned long, index) + __field(unsigned long, last) + __field(void *, node) + ), + + TP_fast_assign( + __entry->fn = fn; + __entry->min = mas->min; + __entry->max = mas->max; + __entry->index = mas->index; + __entry->last = mas->last; + __entry->node = mas->node; + ), + + TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu", + __entry->fn, + (void *) __entry->node, + (unsigned long) __entry->min, + (unsigned long) __entry->max, + (unsigned long) __entry->index, + (unsigned long) __entry->last + ) +) + +TRACE_EVENT(ma_write, + + TP_PROTO(const char *fn, struct ma_state *mas, unsigned long piv, + void *val), + + TP_ARGS(fn, mas, piv, val), + + TP_STRUCT__entry( + __field(const char *, fn) + __field(unsigned long, min) + __field(unsigned long, max) + __field(unsigned long, index) + __field(unsigned long, last) + __field(unsigned long, piv) + __field(void *, val) + __field(void *, node) + ), + + TP_fast_assign( + __entry->fn = fn; + __entry->min = mas->min; + __entry->max = mas->max; + __entry->index = mas->index; + __entry->last = mas->last; + __entry->piv = piv; + __entry->val = val; + __entry->node = mas->node; + ), + + TP_printk("%s\tNode %p (%lu %lu) range:%lu-%lu piv (%lu) val %p", + __entry->fn, + (void *) __entry->node, + (unsigned long) __entry->min, + (unsigned long) __entry->max, + (unsigned long) __entry->index, + (unsigned long) __entry->last, + (unsigned long) __entry->piv, + (void *) __entry->val + ) +) +#endif /* _TRACE_MM_H */ + +/* This part must be outside protection */ +#include diff --git a/init/main.c b/init/main.c index 0ee39cdcfcac9..1edb1bfec640d 100644 --- a/init/main.c +++ b/init/main.c @@ -116,6 +116,7 @@ static int kernel_init(void *); extern void init_IRQ(void); extern void radix_tree_init(void); +extern void maple_tree_init(void); /* * Debug helper: via this flag we know that we are in 'early bootup code' @@ -1001,6 +1002,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); radix_tree_init(); + maple_tree_init(); /* * Set up housekeeping before setting up workqueues to allow the unbound diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0b483a8da409d..a8a36e5897552 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -821,6 +821,13 @@ config DEBUG_VM_VMACACHE can cause significant overhead, so only enable it in non-production environments. +config DEBUG_VM_MAPLE_TREE + bool "Debug VM maple trees" + depends on DEBUG_VM + select DEBUG_MAPLE_TREE + help + Enable VM maple tree debugging information and extra validations. + If unsure, say N. config DEBUG_VM_RB @@ -1636,6 +1643,14 @@ config BUG_ON_DATA_CORRUPTION If unsure, say N. +config DEBUG_MAPLE_TREE + bool "Debug maple trees" + depends on DEBUG_KERNEL + help + Enable maple tree debugging information and extra validations. + + If unsure, say N. + endmenu config DEBUG_CREDENTIALS diff --git a/lib/Makefile b/lib/Makefile index f99bf61f8bbc6..2b845a5e4a80d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,7 +29,8 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ - idr.o extable.o sha1.o irq_regs.o argv_split.o \ + maple_tree.o idr.o extable.o \ + sha1.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c new file mode 100644 index 0000000000000..a968948b3e3e5 --- /dev/null +++ b/lib/maple_tree.c @@ -0,0 +1,7041 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Maple Tree implementation + * Copyright (c) 2018-2022 Oracle Corporation + * Authors: Liam R. Howlett + * Matthew Wilcox + */ + +/* + * DOC: Interesting implementation details of the Maple Tree + * + * Each node type has a number of slots for entries and a number of slots for + * pivots. In the case of dense nodes, the pivots are implied by the position + * and are simply the slot index + the minimum of the node. + * + * In regular B-Tree terms, pivots are called keys. The term pivot is used to + * indicate that the tree is specifying ranges, Pivots may appear in the + * subtree with an entry attached to the value where as keys are unique to a + * specific position of a B-tree. Pivot values are inclusive of the slot with + * the same index. + * + * + * The following illustrates the layout of a range64 nodes slots and pivots. + * + * + * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | + * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ + * │ │ │ │ │ │ │ │ └─ Implied maximum + * │ │ │ │ │ │ │ └─ Pivot 14 + * │ │ │ │ │ │ └─ Pivot 13 + * │ │ │ │ │ └─ Pivot 12 + * │ │ │ │ └─ Pivot 11 + * │ │ │ └─ Pivot 2 + * │ │ └─ Pivot 1 + * │ └─ Pivot 0 + * └─ Implied minimum + * + * Slot contents: + * Internal (non-leaf) nodes contain pointers to other nodes. + * Leaf nodes contain entries. + * + * The location of interest is often referred to as an offset. All offsets have + * a slot, but the last offset has an implied pivot from the node above (or + * UINT_MAX for the root node. + * + * Ranges complicate certain write activities. When modifying any of + * the B-tree variants, it is known that one entry will either be added or + * deleted. When modifying the Maple Tree, one store operation may overwrite + * the entire data set, or one half of the tree, or the middle half of the tree. + * + */ + + +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define MA_ROOT_PARENT 1 + +/* Maple state flags */ +#define MA_STATE_BULK 1 +#define MA_STATE_REBALANCE 2 + +#define ma_parent_ptr(x) ((struct maple_pnode *)(x)) +#define ma_mnode_ptr(x) ((struct maple_node *)(x)) +#define ma_enode_ptr(x) ((struct maple_enode *)(x)) +static struct kmem_cache *maple_node_cache; + +#ifdef CONFIG_DEBUG_MAPLE_TREE +static const unsigned long mt_max[] = { + [maple_dense] = MAPLE_NODE_SLOTS, + [maple_leaf_64] = ULONG_MAX, + [maple_range_64] = ULONG_MAX, + [maple_arange_64] = ULONG_MAX, +}; +#define mt_node_max(x) mt_max[mte_node_type(x)] +#endif + +static const unsigned char mt_slots[] = { + [maple_dense] = MAPLE_NODE_SLOTS, + [maple_leaf_64] = MAPLE_RANGE64_SLOTS, + [maple_range_64] = MAPLE_RANGE64_SLOTS, + [maple_arange_64] = MAPLE_ARANGE64_SLOTS, +}; +#define mt_slot_count(x) mt_slots[mte_node_type(x)] + +static const unsigned char mt_pivots[] = { + [maple_dense] = 0, + [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, + [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, + [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, +}; +#define mt_pivot_count(x) mt_pivots[mte_node_type(x)] + +static const unsigned char mt_min_slots[] = { + [maple_dense] = MAPLE_NODE_SLOTS / 2, + [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, + [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, + [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, +}; +#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] + +#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) +#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) + +struct maple_big_node { + struct maple_pnode *parent; + unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; + union { + struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; + struct { + unsigned long padding[MAPLE_BIG_NODE_GAPS]; + unsigned long gap[MAPLE_BIG_NODE_GAPS]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +/* + * The maple_subtree_state is used to build a tree to replace a segment of an + * existing tree in a more atomic way. Any walkers of the older tree will hit a + * dead node and restart on updates. + */ +struct maple_subtree_state { + struct ma_state *orig_l; /* Original left side of subtree */ + struct ma_state *orig_r; /* Original right side of subtree */ + struct ma_state *l; /* New left side of subtree */ + struct ma_state *m; /* New middle of subtree (rare) */ + struct ma_state *r; /* New right side of subtree */ + struct ma_topiary *free; /* nodes to be freed */ + struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ + struct maple_big_node *bn; +}; + +/* Functions */ +static inline struct maple_node *mt_alloc_one(gfp_t gfp) +{ + return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO); +} + +static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) +{ + return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size, + nodes); +} + +static inline void mt_free_bulk(size_t size, void __rcu **nodes) +{ + kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); +} + +static void mt_free_rcu(struct rcu_head *head) +{ + struct maple_node *node = container_of(head, struct maple_node, rcu); + + kmem_cache_free(maple_node_cache, node); +} + +/* + * ma_free_rcu() - Use rcu callback to free a maple node + * @node: The node to free + * + * The maple tree uses the parent pointer to indicate this node is no longer in + * use and will be freed. + */ +static void ma_free_rcu(struct maple_node *node) +{ + node->parent = ma_parent_ptr(node); + call_rcu(&node->rcu, mt_free_rcu); +} + +static unsigned int mt_height(const struct maple_tree *mt) +{ + return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET; +} + +static void mas_set_height(struct ma_state *mas) +{ + unsigned int new_flags = mas->tree->ma_flags; + + new_flags &= ~MT_FLAGS_HEIGHT_MASK; + BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); + new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; + mas->tree->ma_flags = new_flags; +} + +static unsigned int mas_mt_height(struct ma_state *mas) +{ + return mt_height(mas->tree); +} + +static inline enum maple_type mte_node_type(const struct maple_enode *entry) +{ + return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & + MAPLE_NODE_TYPE_MASK; +} + +static inline bool ma_is_dense(const enum maple_type type) +{ + return type < maple_leaf_64; +} + +static inline bool ma_is_leaf(const enum maple_type type) +{ + return type < maple_range_64; +} + +static inline bool mte_is_leaf(const struct maple_enode *entry) +{ + return ma_is_leaf(mte_node_type(entry)); +} + +/* + * We also reserve values with the bottom two bits set to '10' which are + * below 4096 + */ +static inline bool mt_is_reserved(const void *entry) +{ + return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && + xa_is_internal(entry); +} + +static inline void mas_set_err(struct ma_state *mas, long err) +{ + mas->node = MA_ERROR(err); +} + +static inline bool mas_is_ptr(struct ma_state *mas) +{ + return mas->node == MAS_ROOT; +} + +static inline bool mas_is_start(struct ma_state *mas) +{ + return mas->node == MAS_START; +} + +static inline bool mas_is_err(struct ma_state *mas) +{ + return xa_is_err(mas->node); +} + +static inline bool mas_searchable(struct ma_state *mas) +{ + if (mas_is_none(mas)) + return false; + + if (mas_is_ptr(mas)) + return false; + + return true; +} + +static inline struct maple_node *mte_to_node(const struct maple_enode *entry) +{ + return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); +} + +/* + * mte_to_mat() - Convert a maple encoded node to a maple topiary node. + * @entry: The maple encoded node + * + * Return: a maple topiary pointer + */ +static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) +{ + return (struct maple_topiary *) + ((unsigned long)entry & ~MAPLE_NODE_MASK); +} + +/* + * mas_mn() - Get the maple state node. + * @mas: The maple state + * + * Return: the maple node (not encoded - bare pointer). + */ +static inline struct maple_node *mas_mn(const struct ma_state *mas) +{ + return mte_to_node(mas->node); +} + +/* + * mte_set_node_dead() - Set a maple encoded node as dead. + * @mn: The maple encoded node. + */ +static inline void mte_set_node_dead(struct maple_enode *mn) +{ + mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); + smp_wmb(); /* Needed for RCU */ +} + +/* Bit 1 indicates the root is a node */ +#define MAPLE_ROOT_NODE 0x02 +/* maple_type stored bit 3-6 */ +#define MAPLE_ENODE_TYPE_SHIFT 0x03 +/* Bit 2 means a NULL somewhere below */ +#define MAPLE_ENODE_NULL 0x04 + +static inline struct maple_enode *mt_mk_node(const struct maple_node *node, + enum maple_type type) +{ + return (void *)((unsigned long)node | + (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); +} + +static inline void *mte_mk_root(const struct maple_enode *node) +{ + return (void *)((unsigned long)node | MAPLE_ROOT_NODE); +} + +static inline void *mte_safe_root(const struct maple_enode *node) +{ + return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); +} + +static inline void mte_set_full(const struct maple_enode *node) +{ + node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); +} + +static inline void mte_clear_full(const struct maple_enode *node) +{ + node = (void *)((unsigned long)node | MAPLE_ENODE_NULL); +} + +static inline bool ma_is_root(struct maple_node *node) +{ + return ((unsigned long)node->parent & MA_ROOT_PARENT); +} + +static inline bool mte_is_root(const struct maple_enode *node) +{ + return ma_is_root(mte_to_node(node)); +} + +static inline bool mas_is_root_limits(const struct ma_state *mas) +{ + return !mas->min && mas->max == ULONG_MAX; +} + +static inline bool mt_is_alloc(struct maple_tree *mt) +{ + return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); +} + +/* + * The Parent Pointer + * Excluding root, the parent pointer is 256B aligned like all other tree nodes. + * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 + * bit values need an extra bit to store the offset. This extra bit comes from + * a reuse of the last bit in the node type. This is possible by using bit 1 to + * indicate if bit 2 is part of the type or the slot. + * + * Note types: + * 0x??1 = Root + * 0x?00 = 16 bit nodes + * 0x010 = 32 bit nodes + * 0x110 = 64 bit nodes + * + * Slot size and alignment + * 0b??1 : Root + * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 + * 0b010 : 32 bit values, type in 0-2, slot in 3-7 + * 0b110 : 64 bit values, type in 0-2, slot in 3-7 + */ + +#define MAPLE_PARENT_ROOT 0x01 + +#define MAPLE_PARENT_SLOT_SHIFT 0x03 +#define MAPLE_PARENT_SLOT_MASK 0xF8 + +#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 +#define MAPLE_PARENT_16B_SLOT_MASK 0xFC + +#define MAPLE_PARENT_RANGE64 0x06 +#define MAPLE_PARENT_RANGE32 0x04 +#define MAPLE_PARENT_NOT_RANGE16 0x02 + +/* + * mte_parent_shift() - Get the parent shift for the slot storage. + * @parent: The parent pointer cast as an unsigned long + * Return: The shift into that pointer to the star to of the slot + */ +static inline unsigned long mte_parent_shift(unsigned long parent) +{ + /* Note bit 1 == 0 means 16B */ + if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) + return MAPLE_PARENT_SLOT_SHIFT; + + return MAPLE_PARENT_16B_SLOT_SHIFT; +} + +/* + * mte_parent_slot_mask() - Get the slot mask for the parent. + * @parent: The parent pointer cast as an unsigned long. + * Return: The slot mask for that parent. + */ +static inline unsigned long mte_parent_slot_mask(unsigned long parent) +{ + /* Note bit 1 == 0 means 16B */ + if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) + return MAPLE_PARENT_SLOT_MASK; + + return MAPLE_PARENT_16B_SLOT_MASK; +} + +/* + * mas_parent_enum() - Return the maple_type of the parent from the stored + * parent type. + * @mas: The maple state + * @node: The maple_enode to extract the parent's enum + * Return: The node->parent maple_type + */ +static inline +enum maple_type mte_parent_enum(struct maple_enode *p_enode, + struct maple_tree *mt) +{ + unsigned long p_type; + + p_type = (unsigned long)p_enode; + if (p_type & MAPLE_PARENT_ROOT) + return 0; /* Validated in the caller. */ + + p_type &= MAPLE_NODE_MASK; + p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); + + switch (p_type) { + case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ + if (mt_is_alloc(mt)) + return maple_arange_64; + return maple_range_64; + } + + return 0; +} + +static inline +enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) +{ + return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); +} + +/* + * mte_set_parent() - Set the parent node and encode the slot + * @enode: The encoded maple node. + * @parent: The encoded maple node that is the parent of @enode. + * @slot: The slot that @enode resides in @parent. + * + * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the + * parent type. + */ +static inline +void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, + unsigned char slot) +{ + unsigned long val = (unsigned long) parent; + unsigned long shift; + unsigned long type; + enum maple_type p_type = mte_node_type(parent); + + BUG_ON(p_type == maple_dense); + BUG_ON(p_type == maple_leaf_64); + + switch (p_type) { + case maple_range_64: + case maple_arange_64: + shift = MAPLE_PARENT_SLOT_SHIFT; + type = MAPLE_PARENT_RANGE64; + break; + default: + case maple_dense: + case maple_leaf_64: + shift = type = 0; + break; + } + + val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ + val |= (slot << shift) | type; + mte_to_node(enode)->parent = ma_parent_ptr(val); +} + +/* + * mte_parent_slot() - get the parent slot of @enode. + * @enode: The encoded maple node. + * + * Return: The slot in the parent node where @enode resides. + */ +static inline unsigned int mte_parent_slot(const struct maple_enode *enode) +{ + unsigned long val = (unsigned long) mte_to_node(enode)->parent; + + /* Root. */ + if (val & 1) + return 0; + + /* + * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost + * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT + */ + return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); +} + +/* + * mte_parent() - Get the parent of @node. + * @node: The encoded maple node. + * + * Return: The parent maple node. + */ +static inline struct maple_node *mte_parent(const struct maple_enode *enode) +{ + return (void *)((unsigned long) + (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); +} + +/* + * ma_dead_node() - check if the @enode is dead. + * @enode: The encoded maple node + * + * Return: true if dead, false otherwise. + */ +static inline bool ma_dead_node(const struct maple_node *node) +{ + struct maple_node *parent = (void *)((unsigned long) + node->parent & ~MAPLE_NODE_MASK); + + return (parent == node); +} +/* + * mte_dead_node() - check if the @enode is dead. + * @enode: The encoded maple node + * + * Return: true if dead, false otherwise. + */ +static inline bool mte_dead_node(const struct maple_enode *enode) +{ + struct maple_node *parent, *node; + + node = mte_to_node(enode); + parent = mte_parent(enode); + return (parent == node); +} + +/* + * mas_allocated() - Get the number of nodes allocated in a maple state. + * @mas: The maple state + * + * The ma_state alloc member is overloaded to hold a pointer to the first + * allocated node or to the number of requested nodes to allocate. If bit 0 is + * set, then the alloc contains the number of requested nodes. If there is an + * allocated node, then the total allocated nodes is in that node. + * + * Return: The total number of nodes allocated + */ +static inline unsigned long mas_allocated(const struct ma_state *mas) +{ + if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) + return 0; + + return mas->alloc->total; +} + +/* + * mas_set_alloc_req() - Set the requested number of allocations. + * @mas: the maple state + * @count: the number of allocations. + * + * The requested number of allocations is either in the first allocated node, + * located in @mas->alloc->request_count, or directly in @mas->alloc if there is + * no allocated node. Set the request either in the node or do the necessary + * encoding to store in @mas->alloc directly. + */ +static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) +{ + if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { + if (!count) + mas->alloc = NULL; + else + mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); + return; + } + + mas->alloc->request_count = count; +} + +/* + * mas_alloc_req() - get the requested number of allocations. + * @mas: The maple state + * + * The alloc count is either stored directly in @mas, or in + * @mas->alloc->request_count if there is at least one node allocated. Decode + * the request count if it's stored directly in @mas->alloc. + * + * Return: The allocation request count. + */ +static inline unsigned int mas_alloc_req(const struct ma_state *mas) +{ + if ((unsigned long)mas->alloc & 0x1) + return (unsigned long)(mas->alloc) >> 1; + else if (mas->alloc) + return mas->alloc->request_count; + return 0; +} + +/* + * ma_pivots() - Get a pointer to the maple node pivots. + * @node - the maple node + * @type - the node type + * + * Return: A pointer to the maple node pivots + */ +static inline unsigned long *ma_pivots(struct maple_node *node, + enum maple_type type) +{ + switch (type) { + case maple_arange_64: + return node->ma64.pivot; + case maple_range_64: + case maple_leaf_64: + return node->mr64.pivot; + case maple_dense: + return NULL; + } + return NULL; +} + +/* + * ma_gaps() - Get a pointer to the maple node gaps. + * @node - the maple node + * @type - the node type + * + * Return: A pointer to the maple node gaps + */ +static inline unsigned long *ma_gaps(struct maple_node *node, + enum maple_type type) +{ + switch (type) { + case maple_arange_64: + return node->ma64.gap; + case maple_range_64: + case maple_leaf_64: + case maple_dense: + return NULL; + } + return NULL; +} + +/* + * mte_pivot() - Get the pivot at @piv of the maple encoded node. + * @mn: The maple encoded node. + * @piv: The pivot. + * + * Return: the pivot at @piv of @mn. + */ +static inline unsigned long mte_pivot(const struct maple_enode *mn, + unsigned char piv) +{ + struct maple_node *node = mte_to_node(mn); + + if (piv >= mt_pivots[piv]) { + WARN_ON(1); + return 0; + } + switch (mte_node_type(mn)) { + case maple_arange_64: + return node->ma64.pivot[piv]; + case maple_range_64: + case maple_leaf_64: + return node->mr64.pivot[piv]; + case maple_dense: + return 0; + } + return 0; +} + +/* + * mas_safe_pivot() - get the pivot at @piv or mas->max. + * @mas: The maple state + * @pivots: The pointer to the maple node pivots + * @piv: The pivot to fetch + * @type: The maple node type + * + * Return: The pivot at @piv within the limit of the @pivots array, @mas->max + * otherwise. + */ +static inline unsigned long +mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, + unsigned char piv, enum maple_type type) +{ + if (piv >= mt_pivots[type]) + return mas->max; + + return pivots[piv]; +} + +/* + * mas_safe_min() - Return the minimum for a given offset. + * @mas: The maple state + * @pivots: The pointer to the maple node pivots + * @offset: The offset into the pivot array + * + * Return: The minimum range value that is contained in @offset. + */ +static inline unsigned long +mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) +{ + if (likely(offset)) + return pivots[offset - 1] + 1; + + return mas->min; +} + +/* + * mas_logical_pivot() - Get the logical pivot of a given offset. + * @mas: The maple state + * @pivots: The pointer to the maple node pivots + * @offset: The offset into the pivot array + * @type: The maple node type + * + * When there is no value at a pivot (beyond the end of the data), then the + * pivot is actually @mas->max. + * + * Return: the logical pivot of a given @offset. + */ +static inline unsigned long +mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, + unsigned char offset, enum maple_type type) +{ + unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); + + if (likely(lpiv)) + return lpiv; + + if (likely(offset)) + return mas->max; + + return lpiv; +} + +/* + * mte_set_pivot() - Set a pivot to a value in an encoded maple node. + * @mn: The encoded maple node + * @piv: The pivot offset + * @val: The value of the pivot + */ +static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, + unsigned long val) +{ + struct maple_node *node = mte_to_node(mn); + enum maple_type type = mte_node_type(mn); + + BUG_ON(piv >= mt_pivots[type]); + switch (type) { + default: + case maple_range_64: + case maple_leaf_64: + node->mr64.pivot[piv] = val; + break; + case maple_arange_64: + node->ma64.pivot[piv] = val; + break; + case maple_dense: + break; + } + +} + +/* + * ma_slots() - Get a pointer to the maple node slots. + * @mn: The maple node + * @mt: The maple node type + * + * Return: A pointer to the maple node slots + */ +static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) +{ + switch (mt) { + default: + case maple_arange_64: + return mn->ma64.slot; + case maple_range_64: + case maple_leaf_64: + return mn->mr64.slot; + case maple_dense: + return mn->slot; + } +} + +static inline bool mt_locked(const struct maple_tree *mt) +{ + return mt_external_lock(mt) ? mt_lock_is_held(mt) : + lockdep_is_held(&mt->ma_lock); +} + +static inline void *mt_slot(const struct maple_tree *mt, + void __rcu **slots, unsigned char offset) +{ + return rcu_dereference_check(slots[offset], mt_locked(mt)); +} + +/* + * mas_slot_locked() - Get the slot value when holding the maple tree lock. + * @mas: The maple state + * @slots: The pointer to the slots + * @offset: The offset into the slots array to fetch + * + * Return: The entry stored in @slots at the @offset. + */ +static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, + unsigned char offset) +{ + return rcu_dereference_protected(slots[offset], mt_locked(mas->tree)); +} + +/* + * mas_slot() - Get the slot value when not holding the maple tree lock. + * @mas: The maple state + * @slots: The pointer to the slots + * @offset: The offset into the slots array to fetch + * + * Return: The entry stored in @slots at the @offset + */ +static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, + unsigned char offset) +{ + return mt_slot(mas->tree, slots, offset); +} + +/* + * mas_root() - Get the maple tree root. + * @mas: The maple state. + * + * Return: The pointer to the root of the tree + */ +static inline void *mas_root(struct ma_state *mas) +{ + return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); +} + +static inline void *mt_root_locked(struct maple_tree *mt) +{ + return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); +} + +/* + * mas_root_locked() - Get the maple tree root when holding the maple tree lock. + * @mas: The maple state. + * + * Return: The pointer to the root of the tree + */ +static inline void *mas_root_locked(struct ma_state *mas) +{ + return mt_root_locked(mas->tree); +} + +static inline struct maple_metadata *ma_meta(struct maple_node *mn, + enum maple_type mt) +{ + switch (mt) { + case maple_arange_64: + return &mn->ma64.meta; + default: + return &mn->mr64.meta; + } +} + +/* + * ma_set_meta() - Set the metadata information of a node. + * @mn: The maple node + * @mt: The maple node type + * @offset: The offset of the highest sub-gap in this node. + * @end: The end of the data in this node. + */ +static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, + unsigned char offset, unsigned char end) +{ + struct maple_metadata *meta = ma_meta(mn, mt); + + meta->gap = offset; + meta->end = end; +} + +/* + * ma_meta_end() - Get the data end of a node from the metadata + * @mn: The maple node + * @mt: The maple node type + */ +static inline unsigned char ma_meta_end(struct maple_node *mn, + enum maple_type mt) +{ + struct maple_metadata *meta = ma_meta(mn, mt); + + return meta->end; +} + +/* + * ma_meta_gap() - Get the largest gap location of a node from the metadata + * @mn: The maple node + * @mt: The maple node type + */ +static inline unsigned char ma_meta_gap(struct maple_node *mn, + enum maple_type mt) +{ + BUG_ON(mt != maple_arange_64); + + return mn->ma64.meta.gap; +} + +/* + * ma_set_meta_gap() - Set the largest gap location in a nodes metadata + * @mn: The maple node + * @mn: The maple node type + * @offset: The location of the largest gap. + */ +static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, + unsigned char offset) +{ + + struct maple_metadata *meta = ma_meta(mn, mt); + + meta->gap = offset; +} + +/* + * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. + * @mat - the ma_topiary, a linked list of dead nodes. + * @dead_enode - the node to be marked as dead and added to the tail of the list + * + * Add the @dead_enode to the linked list in @mat. + */ +static inline void mat_add(struct ma_topiary *mat, + struct maple_enode *dead_enode) +{ + mte_set_node_dead(dead_enode); + mte_to_mat(dead_enode)->next = NULL; + if (!mat->tail) { + mat->tail = mat->head = dead_enode; + return; + } + + mte_to_mat(mat->tail)->next = dead_enode; + mat->tail = dead_enode; +} + +static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); +static inline void mas_free(struct ma_state *mas, struct maple_enode *used); + +/* + * mas_mat_free() - Free all nodes in a dead list. + * @mas - the maple state + * @mat - the ma_topiary linked list of dead nodes to free. + * + * Free walk a dead list. + */ +static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) +{ + struct maple_enode *next; + + while (mat->head) { + next = mte_to_mat(mat->head)->next; + mas_free(mas, mat->head); + mat->head = next; + } +} + +/* + * mas_mat_destroy() - Free all nodes and subtrees in a dead list. + * @mas - the maple state + * @mat - the ma_topiary linked list of dead nodes to free. + * + * Destroy walk a dead list. + */ +static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) +{ + struct maple_enode *next; + + while (mat->head) { + next = mte_to_mat(mat->head)->next; + mte_destroy_walk(mat->head, mat->mtree); + mat->head = next; + } +} +/* + * mas_descend() - Descend into the slot stored in the ma_state. + * @mas - the maple state. + * + * Note: Not RCU safe, only use in write side or debug code. + */ +static inline void mas_descend(struct ma_state *mas) +{ + enum maple_type type; + unsigned long *pivots; + struct maple_node *node; + void __rcu **slots; + + node = mas_mn(mas); + type = mte_node_type(mas->node); + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + + if (mas->offset) + mas->min = pivots[mas->offset - 1] + 1; + mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); + mas->node = mas_slot(mas, slots, mas->offset); +} + +/* + * mte_set_gap() - Set a maple node gap. + * @mn: The encoded maple node + * @gap: The offset of the gap to set + * @val: The gap value + */ +static inline void mte_set_gap(const struct maple_enode *mn, + unsigned char gap, unsigned long val) +{ + switch (mte_node_type(mn)) { + default: + break; + case maple_arange_64: + mte_to_node(mn)->ma64.gap[gap] = val; + break; + } +} + +/* + * mas_ascend() - Walk up a level of the tree. + * @mas: The maple state + * + * Sets the @mas->max and @mas->min to the correct values when walking up. This + * may cause several levels of walking up to find the correct min and max. + * May find a dead node which will cause a premature return. + * Return: 1 on dead node, 0 otherwise + */ +static int mas_ascend(struct ma_state *mas) +{ + struct maple_enode *p_enode; /* parent enode. */ + struct maple_enode *a_enode; /* ancestor enode. */ + struct maple_node *a_node; /* ancestor node. */ + struct maple_node *p_node; /* parent node. */ + unsigned char a_slot; + enum maple_type a_type; + unsigned long min, max; + unsigned long *pivots; + unsigned char offset; + bool set_max = false, set_min = false; + + a_node = mas_mn(mas); + if (ma_is_root(a_node)) { + mas->offset = 0; + return 0; + } + + p_node = mte_parent(mas->node); + if (unlikely(a_node == p_node)) + return 1; + a_type = mas_parent_enum(mas, mas->node); + offset = mte_parent_slot(mas->node); + a_enode = mt_mk_node(p_node, a_type); + + /* Check to make sure all parent information is still accurate */ + if (p_node != mte_parent(mas->node)) + return 1; + + mas->node = a_enode; + mas->offset = offset; + + if (mte_is_root(a_enode)) { + mas->max = ULONG_MAX; + mas->min = 0; + return 0; + } + + min = 0; + max = ULONG_MAX; + do { + p_enode = a_enode; + a_type = mas_parent_enum(mas, p_enode); + a_node = mte_parent(p_enode); + a_slot = mte_parent_slot(p_enode); + pivots = ma_pivots(a_node, a_type); + a_enode = mt_mk_node(a_node, a_type); + + if (!set_min && a_slot) { + set_min = true; + min = pivots[a_slot - 1] + 1; + } + + if (!set_max && a_slot < mt_pivots[a_type]) { + set_max = true; + max = pivots[a_slot]; + } + + if (unlikely(ma_dead_node(a_node))) + return 1; + + if (unlikely(ma_is_root(a_node))) + break; + + } while (!set_min || !set_max); + + mas->max = max; + mas->min = min; + return 0; +} + +/* + * mas_pop_node() - Get a previously allocated maple node from the maple state. + * @mas: The maple state + * + * Return: A pointer to a maple node. + */ +static inline struct maple_node *mas_pop_node(struct ma_state *mas) +{ + struct maple_alloc *ret, *node = mas->alloc; + unsigned long total = mas_allocated(mas); + + /* nothing or a request pending. */ + if (unlikely(!total)) + return NULL; + + if (total == 1) { + /* single allocation in this ma_state */ + mas->alloc = NULL; + ret = node; + goto single_node; + } + + if (!node->node_count) { + /* Single allocation in this node. */ + mas->alloc = node->slot[0]; + node->slot[0] = NULL; + mas->alloc->total = node->total - 1; + ret = node; + goto new_head; + } + + node->total--; + ret = node->slot[node->node_count]; + node->slot[node->node_count--] = NULL; + +single_node: +new_head: + ret->total = 0; + ret->node_count = 0; + if (ret->request_count) { + mas_set_alloc_req(mas, ret->request_count + 1); + ret->request_count = 0; + } + return (struct maple_node *)ret; +} + +/* + * mas_push_node() - Push a node back on the maple state allocation. + * @mas: The maple state + * @used: The used maple node + * + * Stores the maple node back into @mas->alloc for reuse. Updates allocated and + * requested node count as necessary. + */ +static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) +{ + struct maple_alloc *reuse = (struct maple_alloc *)used; + struct maple_alloc *head = mas->alloc; + unsigned long count; + unsigned int requested = mas_alloc_req(mas); + + memset(reuse, 0, sizeof(*reuse)); + count = mas_allocated(mas); + + if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) { + if (head->slot[0]) + head->node_count++; + head->slot[head->node_count] = reuse; + head->total++; + goto done; + } + + reuse->total = 1; + if ((head) && !((unsigned long)head & 0x1)) { + head->request_count = 0; + reuse->slot[0] = head; + reuse->total += head->total; + } + + mas->alloc = reuse; +done: + if (requested > 1) + mas_set_alloc_req(mas, requested - 1); +} + +/* + * mas_alloc_nodes() - Allocate nodes into a maple state + * @mas: The maple state + * @gfp: The GFP Flags + */ +static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) +{ + struct maple_alloc *node; + struct maple_alloc **nodep = &mas->alloc; + unsigned long allocated = mas_allocated(mas); + unsigned long success = allocated; + unsigned int requested = mas_alloc_req(mas); + unsigned int count; + void **slots = NULL; + unsigned int max_req = 0; + + if (!requested) + return; + + mas_set_alloc_req(mas, 0); + if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) { + node = (struct maple_alloc *)mt_alloc_one(gfp); + if (!node) + goto nomem_one; + + if (allocated) + node->slot[0] = mas->alloc; + + success++; + mas->alloc = node; + requested--; + } + + node = mas->alloc; + while (requested) { + max_req = MAPLE_NODE_SLOTS - 1; + if (node->slot[0]) { + unsigned int offset = node->node_count + 1; + + slots = (void **)&node->slot[offset]; + max_req -= offset; + } else { + slots = (void **)&node->slot; + } + + max_req = min(requested, max_req); + count = mt_alloc_bulk(gfp, max_req, slots); + if (!count) + goto nomem_bulk; + + node->node_count += count; + /* zero indexed. */ + if (slots == (void **)&node->slot) + node->node_count--; + + success += count; + nodep = &node->slot[0]; + node = *nodep; + requested -= count; + } + mas->alloc->total = success; + return; + +nomem_bulk: + /* Clean up potential freed allocations on bulk failure */ + memset(slots, 0, max_req * sizeof(unsigned long)); +nomem_one: + mas_set_alloc_req(mas, requested); + if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) + mas->alloc->total = success; + mas_set_err(mas, -ENOMEM); + return; + +} + +/* + * mas_free() - Free an encoded maple node + * @mas: The maple state + * @used: The encoded maple node to free. + * + * Uses rcu free if necessary, pushes @used back on the maple state allocations + * otherwise. + */ +static inline void mas_free(struct ma_state *mas, struct maple_enode *used) +{ + struct maple_node *tmp = mte_to_node(used); + + if (mt_in_rcu(mas->tree)) + ma_free_rcu(tmp); + else + mas_push_node(mas, tmp); +} + +/* + * mas_node_count() - Check if enough nodes are allocated and request more if + * there is not enough nodes. + * @mas: The maple state + * @count: The number of nodes needed + */ +static void mas_node_count(struct ma_state *mas, int count) +{ + unsigned long allocated = mas_allocated(mas); + + if (allocated < count) { + mas_set_alloc_req(mas, count - allocated); + mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN); + } +} + +/* + * mas_start() - Sets up maple state for operations. + * @mas: The maple state. + * + * If mas->node == MAS_START, then set the min, max, depth, and offset to + * defaults. + * + * Return: + * - If mas->node is an error or not MAS_START, return NULL. + * - If it's an empty tree: NULL & mas->node == MAS_NONE + * - If it's a single entry: The entry & mas->node == MAS_ROOT + * - If it's a tree: NULL & mas->node == safe root node. + */ +static inline struct maple_enode *mas_start(struct ma_state *mas) +{ + if (likely(mas_is_start(mas))) { + struct maple_enode *root; + + mas->node = MAS_NONE; + mas->min = 0; + mas->max = ULONG_MAX; + mas->depth = 0; + mas->offset = 0; + + root = mas_root(mas); + /* Tree with nodes */ + if (likely(xa_is_node(root))) { + mas->node = mte_safe_root(root); + return NULL; + } + + /* empty tree */ + if (unlikely(!root)) { + mas->offset = MAPLE_NODE_SLOTS; + return NULL; + } + + /* Single entry tree */ + mas->node = MAS_ROOT; + mas->offset = MAPLE_NODE_SLOTS; + + /* Single entry tree. */ + if (mas->index > 0) + return NULL; + + return root; + } + + return NULL; +} + +/* + * ma_data_end() - Find the end of the data in a node. + * @node: The maple node + * @type: The maple node type + * @pivots: The array of pivots in the node + * @max: The maximum value in the node + * + * Uses metadata to find the end of the data when possible. + * Return: The zero indexed last slot with data (may be null). + */ +static inline unsigned char ma_data_end(struct maple_node *node, + enum maple_type type, + unsigned long *pivots, + unsigned long max) +{ + unsigned char offset; + + if (type == maple_arange_64) + return ma_meta_end(node, type); + + offset = mt_pivots[type] - 1; + if (likely(!pivots[offset])) + return ma_meta_end(node, type); + + if (likely(pivots[offset] == max)) + return offset; + + return mt_pivots[type]; +} + +/* + * mas_data_end() - Find the end of the data (slot). + * @mas: the maple state + * + * This method is optimized to check the metadata of a node if the node type + * supports data end metadata. + * + * Return: The zero indexed last slot with data (may be null). + */ +static inline unsigned char mas_data_end(struct ma_state *mas) +{ + enum maple_type type; + struct maple_node *node; + unsigned char offset; + unsigned long *pivots; + + type = mte_node_type(mas->node); + node = mas_mn(mas); + if (type == maple_arange_64) + return ma_meta_end(node, type); + + pivots = ma_pivots(node, type); + offset = mt_pivots[type] - 1; + if (likely(!pivots[offset])) + return ma_meta_end(node, type); + + if (likely(pivots[offset] == mas->max)) + return offset; + + return mt_pivots[type]; +} + +/* + * mas_leaf_max_gap() - Returns the largest gap in a leaf node + * @mas - the maple state + * + * Return: The maximum gap in the leaf. + */ +static unsigned long mas_leaf_max_gap(struct ma_state *mas) +{ + enum maple_type mt; + unsigned long pstart, gap, max_gap; + struct maple_node *mn; + unsigned long *pivots; + void __rcu **slots; + unsigned char i; + unsigned char max_piv; + + mt = mte_node_type(mas->node); + mn = mas_mn(mas); + slots = ma_slots(mn, mt); + max_gap = 0; + if (unlikely(ma_is_dense(mt))) { + gap = 0; + for (i = 0; i < mt_slots[mt]; i++) { + if (slots[i]) { + if (gap > max_gap) + max_gap = gap; + gap = 0; + } else { + gap++; + } + } + if (gap > max_gap) + max_gap = gap; + return max_gap; + } + + /* + * Check the first implied pivot optimizes the loop below and slot 1 may + * be skipped if there is a gap in slot 0. + */ + pivots = ma_pivots(mn, mt); + if (likely(!slots[0])) { + max_gap = pivots[0] - mas->min + 1; + i = 2; + } else { + i = 1; + } + + /* reduce max_piv as the special case is checked before the loop */ + max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; + /* + * Check end implied pivot which can only be a gap on the right most + * node. + */ + if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { + gap = ULONG_MAX - pivots[max_piv]; + if (gap > max_gap) + max_gap = gap; + } + + for (; i <= max_piv; i++) { + /* data == no gap. */ + if (likely(slots[i])) + continue; + + pstart = pivots[i - 1]; + gap = pivots[i] - pstart; + if (gap > max_gap) + max_gap = gap; + + /* There cannot be two gaps in a row. */ + i++; + } + return max_gap; +} + +/* + * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) + * @node: The maple node + * @gaps: The pointer to the gaps + * @mt: The maple node type + * @*off: Pointer to store the offset location of the gap. + * + * Uses the metadata data end to scan backwards across set gaps. + * + * Return: The maximum gap value + */ +static inline unsigned long +ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, + unsigned char *off) +{ + unsigned char offset, i; + unsigned long max_gap = 0; + + i = offset = ma_meta_end(node, mt); + do { + if (gaps[i] > max_gap) { + max_gap = gaps[i]; + offset = i; + } + } while (i--); + + *off = offset; + return max_gap; +} + +/* + * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. + * @mas: The maple state. + * + * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. + * + * Return: The gap value. + */ +static inline unsigned long mas_max_gap(struct ma_state *mas) +{ + unsigned long *gaps; + unsigned char offset; + enum maple_type mt; + struct maple_node *node; + + mt = mte_node_type(mas->node); + if (ma_is_leaf(mt)) + return mas_leaf_max_gap(mas); + + node = mas_mn(mas); + offset = ma_meta_gap(node, mt); + if (offset == MAPLE_ARANGE64_META_MAX) + return 0; + + gaps = ma_gaps(node, mt); + return gaps[offset]; +} + +/* + * mas_parent_gap() - Set the parent gap and any gaps above, as needed + * @mas: The maple state + * @offset: The gap offset in the parent to set + * @new: The new gap value. + * + * Set the parent gap then continue to set the gap upwards, using the metadata + * of the parent to see if it is necessary to check the node above. + */ +static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, + unsigned long new) +{ + unsigned long meta_gap = 0; + struct maple_node *pnode; + struct maple_enode *penode; + unsigned long *pgaps; + unsigned char meta_offset; + enum maple_type pmt; + + pnode = mte_parent(mas->node); + pmt = mas_parent_enum(mas, mas->node); + penode = mt_mk_node(pnode, pmt); + pgaps = ma_gaps(pnode, pmt); + +ascend: + meta_offset = ma_meta_gap(pnode, pmt); + if (meta_offset == MAPLE_ARANGE64_META_MAX) + meta_gap = 0; + else + meta_gap = pgaps[meta_offset]; + + pgaps[offset] = new; + + if (meta_gap == new) + return; + + if (offset != meta_offset) { + if (meta_gap > new) + return; + + ma_set_meta_gap(pnode, pmt, offset); + } else if (new < meta_gap) { + meta_offset = 15; + new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); + ma_set_meta_gap(pnode, pmt, meta_offset); + } + + if (ma_is_root(pnode)) + return; + + /* Go to the parent node. */ + pnode = mte_parent(penode); + pmt = mas_parent_enum(mas, penode); + pgaps = ma_gaps(pnode, pmt); + offset = mte_parent_slot(penode); + penode = mt_mk_node(pnode, pmt); + goto ascend; +} + +/* + * mas_update_gap() - Update a nodes gaps and propagate up if necessary. + * @mas - the maple state. + */ +static inline void mas_update_gap(struct ma_state *mas) +{ + unsigned char pslot; + unsigned long p_gap; + unsigned long max_gap; + + if (!mt_is_alloc(mas->tree)) + return; + + if (mte_is_root(mas->node)) + return; + + max_gap = mas_max_gap(mas); + + pslot = mte_parent_slot(mas->node); + p_gap = ma_gaps(mte_parent(mas->node), + mas_parent_enum(mas, mas->node))[pslot]; + + if (p_gap != max_gap) + mas_parent_gap(mas, pslot, max_gap); +} + +/* + * mas_adopt_children() - Set the parent pointer of all nodes in @parent to + * @parent with the slot encoded. + * @mas - the maple state (for the tree) + * @parent - the maple encoded node containing the children. + */ +static inline void mas_adopt_children(struct ma_state *mas, + struct maple_enode *parent) +{ + enum maple_type type = mte_node_type(parent); + struct maple_node *node = mas_mn(mas); + void __rcu **slots = ma_slots(node, type); + unsigned long *pivots = ma_pivots(node, type); + struct maple_enode *child; + unsigned char offset; + + offset = ma_data_end(node, type, pivots, mas->max); + do { + child = mas_slot_locked(mas, slots, offset); + mte_set_parent(child, parent, offset); + } while (offset--); +} + +/* + * mas_replace() - Replace a maple node in the tree with mas->node. Uses the + * parent encoding to locate the maple node in the tree. + * @mas - the ma_state to use for operations. + * @advanced - boolean to adopt the child nodes and free the old node (false) or + * leave the node (true) and handle the adoption and free elsewhere. + */ +static inline void mas_replace(struct ma_state *mas, bool advanced) + __must_hold(mas->tree->lock) +{ + struct maple_node *mn = mas_mn(mas); + struct maple_enode *old_enode; + unsigned char offset = 0; + void __rcu **slots = NULL; + + if (ma_is_root(mn)) { + old_enode = mas_root_locked(mas); + } else { + offset = mte_parent_slot(mas->node); + slots = ma_slots(mte_parent(mas->node), + mas_parent_enum(mas, mas->node)); + old_enode = mas_slot_locked(mas, slots, offset); + } + + if (!advanced && !mte_is_leaf(mas->node)) + mas_adopt_children(mas, mas->node); + + if (mte_is_root(mas->node)) { + mn->parent = ma_parent_ptr( + ((unsigned long)mas->tree | MA_ROOT_PARENT)); + rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); + mas_set_height(mas); + } else { + rcu_assign_pointer(slots[offset], mas->node); + } + + if (!advanced) + mas_free(mas, old_enode); +} + +/* + * mas_new_child() - Find the new child of a node. + * @mas: the maple state + * @child: the maple state to store the child. + */ +static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) + __must_hold(mas->tree->lock) +{ + enum maple_type mt; + unsigned char offset; + unsigned char end; + unsigned long *pivots; + struct maple_enode *entry; + struct maple_node *node; + void __rcu **slots; + + mt = mte_node_type(mas->node); + node = mas_mn(mas); + slots = ma_slots(node, mt); + pivots = ma_pivots(node, mt); + end = ma_data_end(node, mt, pivots, mas->max); + for (offset = mas->offset; offset <= end; offset++) { + entry = mas_slot_locked(mas, slots, offset); + if (mte_parent(entry) == node) { + *child = *mas; + mas->offset = offset + 1; + child->offset = offset; + mas_descend(child); + child->offset = 0; + return true; + } + } + return false; +} + +/* + * mab_shift_right() - Shift the data in mab right. Note, does not clean out the + * old data or set b_node->b_end. + * @b_node: the maple_big_node + * @shift: the shift count + */ +static inline void mab_shift_right(struct maple_big_node *b_node, + unsigned char shift) +{ + unsigned long size = b_node->b_end * sizeof(unsigned long); + + memmove(b_node->pivot + shift, b_node->pivot, size); + memmove(b_node->slot + shift, b_node->slot, size); + if (b_node->type == maple_arange_64) + memmove(b_node->gap + shift, b_node->gap, size); +} + +/* + * mab_middle_node() - Check if a middle node is needed (unlikely) + * @b_node: the maple_big_node that contains the data. + * @size: the amount of data in the b_node + * @split: the potential split location + * @slot_count: the size that can be stored in a single node being considered. + * + * Return: true if a middle node is required. + */ +static inline bool mab_middle_node(struct maple_big_node *b_node, int split, + unsigned char slot_count) +{ + unsigned char size = b_node->b_end; + + if (size >= 2 * slot_count) + return true; + + if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) + return true; + + return false; +} + +/* + * mab_no_null_split() - ensure the split doesn't fall on a NULL + * @b_node: the maple_big_node with the data + * @split: the suggested split location + * @slot_count: the number of slots in the node being considered. + * + * Return: the split location. + */ +static inline int mab_no_null_split(struct maple_big_node *b_node, + unsigned char split, unsigned char slot_count) +{ + if (!b_node->slot[split]) { + /* + * If the split is less than the max slot && the right side will + * still be sufficient, then increment the split on NULL. + */ + if ((split < slot_count - 1) && + (b_node->b_end - split) > (mt_min_slots[b_node->type])) + split++; + else + split--; + } + return split; +} + +/* + * mab_calc_split() - Calculate the split location and if there needs to be two + * splits. + * @bn: The maple_big_node with the data + * @mid_split: The second split, if required. 0 otherwise. + * + * Return: The first split location. The middle split is set in @mid_split. + */ +static inline int mab_calc_split(struct ma_state *mas, + struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) +{ + unsigned char b_end = bn->b_end; + int split = b_end / 2; /* Assume equal split. */ + unsigned char slot_min, slot_count = mt_slots[bn->type]; + + /* + * To support gap tracking, all NULL entries are kept together and a node cannot + * end on a NULL entry, with the exception of the left-most leaf. The + * limitation means that the split of a node must be checked for this condition + * and be able to put more data in one direction or the other. + */ + if (unlikely((mas->mas_flags & MA_STATE_BULK))) { + *mid_split = 0; + if (ma_is_leaf(bn->type)) + slot_min = 2; + else + return b_end - mt_min_slots[bn->type]; + + split = b_end - slot_min; + mas->mas_flags |= MA_STATE_REBALANCE; + if (!bn->slot[split]) + split--; + return split; + } + + /* + * Although extremely rare, it is possible to enter what is known as the 3-way + * split scenario. The 3-way split comes about by means of a store of a range + * that overwrites the end and beginning of two full nodes. The result is a set + * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can + * also be located in different parent nodes which are also full. This can + * carry upwards all the way to the root in the worst case. + */ + if (unlikely(mab_middle_node(bn, split, slot_count))) { + split = b_end / 3; + *mid_split = split * 2; + } else { + slot_min = mt_min_slots[bn->type]; + + *mid_split = 0; + /* + * Avoid having a range less than the slot count unless it + * causes one node to be deficient. + * NOTE: mt_min_slots is 1 based, b_end and split are zero. + */ + while (((bn->pivot[split] - min) < slot_count - 1) && + (split < slot_count - 1) && (b_end - split > slot_min)) + split++; + } + + /* Avoid ending a node on a NULL entry */ + split = mab_no_null_split(bn, split, slot_count); + if (!(*mid_split)) + return split; + + *mid_split = mab_no_null_split(bn, *mid_split, slot_count); + + return split; +} + +/* + * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node + * and set @b_node->b_end to the next free slot. + * @mas: The maple state + * @mas_start: The starting slot to copy + * @mas_end: The end slot to copy (inclusively) + * @b_node: The maple_big_node to place the data + * @mab_start: The starting location in maple_big_node to store the data. + */ +static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, + unsigned char mas_end, struct maple_big_node *b_node, + unsigned char mab_start) +{ + enum maple_type mt; + struct maple_node *node; + void __rcu **slots; + unsigned long *pivots, *gaps; + int i = mas_start, j = mab_start; + unsigned char piv_end; + + node = mas_mn(mas); + mt = mte_node_type(mas->node); + pivots = ma_pivots(node, mt); + if (!i) { + b_node->pivot[j] = pivots[i++]; + if (unlikely(i > mas_end)) + goto complete; + j++; + } + + piv_end = min(mas_end, mt_pivots[mt]); + for (; i < piv_end; i++, j++) { + b_node->pivot[j] = pivots[i]; + if (unlikely(!b_node->pivot[j])) + break; + + if (unlikely(mas->max == b_node->pivot[j])) + goto complete; + } + + if (likely(i <= mas_end)) + b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); + +complete: + b_node->b_end = ++j; + j -= mab_start; + slots = ma_slots(node, mt); + memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); + if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { + gaps = ma_gaps(node, mt); + memcpy(b_node->gap + mab_start, gaps + mas_start, + sizeof(unsigned long) * j); + } +} + +/* + * mas_leaf_set_meta() - Set the metadata of a leaf if possible. + * @mas: The maple state + * @node: The maple node + * @pivots: pointer to the maple node pivots + * @mt: The maple type + * @end: The assumed end + * + * Note, end may be incremented within this function but not modified at the + * source. This is fine since the metadata is the last thing to be stored in a + * node during a write. + */ +static inline void mas_leaf_set_meta(struct ma_state *mas, + struct maple_node *node, unsigned long *pivots, + enum maple_type mt, unsigned char end) +{ + /* There is no room for metadata already */ + if (mt_pivots[mt] <= end) + return; + + if (pivots[end] && pivots[end] < mas->max) + end++; + + if (end < mt_slots[mt] - 1) + ma_set_meta(node, mt, 0, end); +} + +/* + * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. + * @b_node: the maple_big_node that has the data + * @mab_start: the start location in @b_node. + * @mab_end: The end location in @b_node (inclusively) + * @mas: The maple state with the maple encoded node. + */ +static inline void mab_mas_cp(struct maple_big_node *b_node, + unsigned char mab_start, unsigned char mab_end, + struct ma_state *mas, bool new_max) +{ + int i, j = 0; + enum maple_type mt = mte_node_type(mas->node); + struct maple_node *node = mte_to_node(mas->node); + void __rcu **slots = ma_slots(node, mt); + unsigned long *pivots = ma_pivots(node, mt); + unsigned long *gaps = NULL; + unsigned char end; + + if (mab_end - mab_start > mt_pivots[mt]) + mab_end--; + + if (!pivots[mt_pivots[mt] - 1]) + slots[mt_pivots[mt]] = NULL; + + i = mab_start; + pivots[j++] = b_node->pivot[i++]; + do { + pivots[j++] = b_node->pivot[i++]; + } while (i <= mab_end && likely(b_node->pivot[i])); + + memcpy(slots, b_node->slot + mab_start, + sizeof(void *) * (i - mab_start)); + + if (new_max) + mas->max = b_node->pivot[i - 1]; + + end = j - 1; + if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { + unsigned long max_gap = 0; + unsigned char offset = 15; + + gaps = ma_gaps(node, mt); + do { + gaps[--j] = b_node->gap[--i]; + if (gaps[j] > max_gap) { + offset = j; + max_gap = gaps[j]; + } + } while (j); + + ma_set_meta(node, mt, offset, end); + } else { + mas_leaf_set_meta(mas, node, pivots, mt, end); + } +} + +/* + * mas_descend_adopt() - Descend through a sub-tree and adopt children. + * @mas: the maple state with the maple encoded node of the sub-tree. + * + * Descend through a sub-tree and adopt children who do not have the correct + * parents set. Follow the parents which have the correct parents as they are + * the new entries which need to be followed to find other incorrectly set + * parents. + */ +static inline void mas_descend_adopt(struct ma_state *mas) +{ + struct ma_state list[3], next[3]; + int i, n; + + /* + * At each level there may be up to 3 correct parent pointers which indicates + * the new nodes which need to be walked to find any new nodes at a lower level. + */ + + for (i = 0; i < 3; i++) { + list[i] = *mas; + list[i].offset = 0; + next[i].offset = 0; + } + next[0] = *mas; + + while (!mte_is_leaf(list[0].node)) { + n = 0; + for (i = 0; i < 3; i++) { + if (mas_is_none(&list[i])) + continue; + + if (i && list[i-1].node == list[i].node) + continue; + + while ((n < 3) && (mas_new_child(&list[i], &next[n]))) + n++; + + mas_adopt_children(&list[i], list[i].node); + } + + while (n < 3) + next[n++].node = MAS_NONE; + + /* descend by setting the list to the children */ + for (i = 0; i < 3; i++) + list[i] = next[i]; + } +} + +/* + * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. + * @mas: The maple state + * @end: The maple node end + * @mt: The maple node type + */ +static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, + enum maple_type mt) +{ + if (!(mas->mas_flags & MA_STATE_BULK)) + return; + + if (mte_is_root(mas->node)) + return; + + if (end > mt_min_slots[mt]) { + mas->mas_flags &= ~MA_STATE_REBALANCE; + return; + } +} + +/* + * mas_store_b_node() - Store an @entry into the b_node while also copying the + * data from a maple encoded node. + * @wr_mas: the maple write state + * @b_node: the maple_big_node to fill with data + * @offset_end: the offset to end copying + * + * Return: The actual end of the data stored in @b_node + */ +static inline void mas_store_b_node(struct ma_wr_state *wr_mas, + struct maple_big_node *b_node, unsigned char offset_end) +{ + unsigned char slot; + unsigned char b_end; + /* Possible underflow of piv will wrap back to 0 before use. */ + unsigned long piv; + struct ma_state *mas = wr_mas->mas; + + b_node->type = wr_mas->type; + b_end = 0; + slot = mas->offset; + if (slot) { + /* Copy start data up to insert. */ + mas_mab_cp(mas, 0, slot - 1, b_node, 0); + b_end = b_node->b_end; + piv = b_node->pivot[b_end - 1]; + } else + piv = mas->min - 1; + + if (piv + 1 < mas->index) { + /* Handle range starting after old range */ + b_node->slot[b_end] = wr_mas->content; + if (!wr_mas->content) + b_node->gap[b_end] = mas->index - 1 - piv; + b_node->pivot[b_end++] = mas->index - 1; + } + + /* Store the new entry. */ + mas->offset = b_end; + b_node->slot[b_end] = wr_mas->entry; + b_node->pivot[b_end] = mas->last; + + /* Appended. */ + if (mas->last >= mas->max) + goto b_end; + + /* Handle new range ending before old range ends */ + piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); + if (piv > mas->last) { + if (piv == ULONG_MAX) + mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); + + if (offset_end != slot) + wr_mas->content = mas_slot_locked(mas, wr_mas->slots, + offset_end); + + b_node->slot[++b_end] = wr_mas->content; + if (!wr_mas->content) + b_node->gap[b_end] = piv - mas->last + 1; + b_node->pivot[b_end] = piv; + } + + slot = offset_end + 1; + if (slot > wr_mas->node_end) + goto b_end; + + /* Copy end data to the end of the node. */ + mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); + b_node->b_end--; + return; + +b_end: + b_node->b_end = b_end; +} + +/* + * mas_prev_sibling() - Find the previous node with the same parent. + * @mas: the maple state + * + * Return: True if there is a previous sibling, false otherwise. + */ +static inline bool mas_prev_sibling(struct ma_state *mas) +{ + unsigned int p_slot = mte_parent_slot(mas->node); + + if (mte_is_root(mas->node)) + return false; + + if (!p_slot) + return false; + + mas_ascend(mas); + mas->offset = p_slot - 1; + mas_descend(mas); + return true; +} + +/* + * mas_next_sibling() - Find the next node with the same parent. + * @mas: the maple state + * + * Return: true if there is a next sibling, false otherwise. + */ +static inline bool mas_next_sibling(struct ma_state *mas) +{ + MA_STATE(parent, mas->tree, mas->index, mas->last); + + if (mte_is_root(mas->node)) + return false; + + parent = *mas; + mas_ascend(&parent); + parent.offset = mte_parent_slot(mas->node) + 1; + if (parent.offset > mas_data_end(&parent)) + return false; + + *mas = parent; + mas_descend(mas); + return true; +} + +/* + * mte_node_or_node() - Return the encoded node or MAS_NONE. + * @enode: The encoded maple node. + * + * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. + * + * Return: @enode or MAS_NONE + */ +static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) +{ + if (enode) + return enode; + + return ma_enode_ptr(MAS_NONE); +} + +/* + * mas_wr_node_walk() - Find the correct offset for the index in the @mas. + * @wr_mas: The maple write state + * + * Uses mas_slot_locked() and does not need to worry about dead nodes. + */ +static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + unsigned char count; + unsigned char offset; + unsigned long index, min, max; + + if (unlikely(ma_is_dense(wr_mas->type))) { + wr_mas->r_max = wr_mas->r_min = mas->index; + mas->offset = mas->index = mas->min; + return; + } + + wr_mas->node = mas_mn(wr_mas->mas); + wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); + count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, + wr_mas->pivots, mas->max); + offset = mas->offset; + min = mas_safe_min(mas, wr_mas->pivots, offset); + max = wr_mas->pivots[offset]; + if (unlikely(offset == count)) + goto max; /* may have been set to zero */ + + index = mas->index; + if (unlikely(index <= max)) + goto done; + + if (unlikely(!max && offset)) + goto max; + + min = max + 1; + while (++offset < count) { + max = wr_mas->pivots[offset]; + if (index <= max) + goto done; + else if (unlikely(!max)) + break; + + min = max + 1; + } + +max: + max = mas->max; +done: + wr_mas->r_max = max; + wr_mas->r_min = min; + wr_mas->offset_end = mas->offset = offset; +} + +/* + * mas_topiary_range() - Add a range of slots to the topiary. + * @mas: The maple state + * @destroy: The topiary to add the slots (usually destroy) + * @start: The starting slot inclusively + * @end: The end slot inclusively + */ +static inline void mas_topiary_range(struct ma_state *mas, + struct ma_topiary *destroy, unsigned char start, unsigned char end) +{ + void __rcu **slots; + unsigned offset; + + MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); + slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); + for (offset = start; offset <= end; offset++) { + struct maple_enode *enode = mas_slot_locked(mas, slots, offset); + + if (mte_dead_node(enode)) + continue; + + mat_add(destroy, enode); + } +} + +/* + * mast_topiary() - Add the portions of the tree to the removal list; either to + * be freed or discarded (destroy walk). + * @mast: The maple_subtree_state. + */ +static inline void mast_topiary(struct maple_subtree_state *mast) +{ + MA_WR_STATE(wr_mas, mast->orig_l, NULL); + unsigned char r_start, r_end; + unsigned char l_start, l_end; + void **l_slots, **r_slots; + + wr_mas.type = mte_node_type(mast->orig_l->node); + mast->orig_l->index = mast->orig_l->last; + mas_wr_node_walk(&wr_mas); + l_start = mast->orig_l->offset + 1; + l_end = mas_data_end(mast->orig_l); + r_start = 0; + r_end = mast->orig_r->offset; + + if (r_end) + r_end--; + + l_slots = ma_slots(mas_mn(mast->orig_l), + mte_node_type(mast->orig_l->node)); + + r_slots = ma_slots(mas_mn(mast->orig_r), + mte_node_type(mast->orig_r->node)); + + if ((l_start < l_end) && + mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { + l_start++; + } + + if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { + if (r_end) + r_end--; + } + + if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) + return; + + /* At the node where left and right sides meet, add the parts between */ + if (mast->orig_l->node == mast->orig_r->node) { + return mas_topiary_range(mast->orig_l, mast->destroy, + l_start, r_end); + } + + /* mast->orig_r is different and consumed. */ + if (mte_is_leaf(mast->orig_r->node)) + return; + + if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) + l_end--; + + + if (l_start <= l_end) + mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); + + if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) + r_start++; + + if (r_start <= r_end) + mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); +} + +/* + * mast_rebalance_next() - Rebalance against the next node + * @mast: The maple subtree state + * @old_r: The encoded maple node to the right (next node). + */ +static inline void mast_rebalance_next(struct maple_subtree_state *mast) +{ + unsigned char b_end = mast->bn->b_end; + + mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), + mast->bn, b_end); + mast->orig_r->last = mast->orig_r->max; +} + +/* + * mast_rebalance_prev() - Rebalance against the previous node + * @mast: The maple subtree state + * @old_l: The encoded maple node to the left (previous node) + */ +static inline void mast_rebalance_prev(struct maple_subtree_state *mast) +{ + unsigned char end = mas_data_end(mast->orig_l) + 1; + unsigned char b_end = mast->bn->b_end; + + mab_shift_right(mast->bn, end); + mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); + mast->l->min = mast->orig_l->min; + mast->orig_l->index = mast->orig_l->min; + mast->bn->b_end = end + b_end; + mast->l->offset += end; +} + +/* + * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring + * the node to the right. Checking the nodes to the right then the left at each + * level upwards until root is reached. Free and destroy as needed. + * Data is copied into the @mast->bn. + * @mast: The maple_subtree_state. + */ +static inline +bool mast_spanning_rebalance(struct maple_subtree_state *mast) +{ + struct ma_state r_tmp = *mast->orig_r; + struct ma_state l_tmp = *mast->orig_l; + struct maple_enode *ancestor = NULL; + unsigned char start, end; + unsigned char depth = 0; + + r_tmp = *mast->orig_r; + l_tmp = *mast->orig_l; + do { + mas_ascend(mast->orig_r); + mas_ascend(mast->orig_l); + depth++; + if (!ancestor && + (mast->orig_r->node == mast->orig_l->node)) { + ancestor = mast->orig_r->node; + end = mast->orig_r->offset - 1; + start = mast->orig_l->offset + 1; + } + + if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { + if (!ancestor) { + ancestor = mast->orig_r->node; + start = 0; + } + + mast->orig_r->offset++; + do { + mas_descend(mast->orig_r); + mast->orig_r->offset = 0; + depth--; + } while (depth); + + mast_rebalance_next(mast); + do { + unsigned char l_off = 0; + struct maple_enode *child = r_tmp.node; + + mas_ascend(&r_tmp); + if (ancestor == r_tmp.node) + l_off = start; + + if (r_tmp.offset) + r_tmp.offset--; + + if (l_off < r_tmp.offset) + mas_topiary_range(&r_tmp, mast->destroy, + l_off, r_tmp.offset); + + if (l_tmp.node != child) + mat_add(mast->free, child); + + } while (r_tmp.node != ancestor); + + *mast->orig_l = l_tmp; + return true; + + } else if (mast->orig_l->offset != 0) { + if (!ancestor) { + ancestor = mast->orig_l->node; + end = mas_data_end(mast->orig_l); + } + + mast->orig_l->offset--; + do { + mas_descend(mast->orig_l); + mast->orig_l->offset = + mas_data_end(mast->orig_l); + depth--; + } while (depth); + + mast_rebalance_prev(mast); + do { + unsigned char r_off; + struct maple_enode *child = l_tmp.node; + + mas_ascend(&l_tmp); + if (ancestor == l_tmp.node) + r_off = end; + else + r_off = mas_data_end(&l_tmp); + + if (l_tmp.offset < r_off) + l_tmp.offset++; + + if (l_tmp.offset < r_off) + mas_topiary_range(&l_tmp, mast->destroy, + l_tmp.offset, r_off); + + if (r_tmp.node != child) + mat_add(mast->free, child); + + } while (l_tmp.node != ancestor); + + *mast->orig_r = r_tmp; + return true; + } + } while (!mte_is_root(mast->orig_r->node)); + + *mast->orig_r = r_tmp; + *mast->orig_l = l_tmp; + return false; +} + +/* + * mast_ascend_free() - Add current original maple state nodes to the free list + * and ascend. + * @mast: the maple subtree state. + * + * Ascend the original left and right sides and add the previous nodes to the + * free list. Set the slots to point to the correct location in the new nodes. + */ +static inline void +mast_ascend_free(struct maple_subtree_state *mast) +{ + MA_WR_STATE(wr_mas, mast->orig_r, NULL); + struct maple_enode *left = mast->orig_l->node; + struct maple_enode *right = mast->orig_r->node; + + mas_ascend(mast->orig_l); + mas_ascend(mast->orig_r); + mat_add(mast->free, left); + + if (left != right) + mat_add(mast->free, right); + + mast->orig_r->offset = 0; + mast->orig_r->index = mast->r->max; + /* last should be larger than or equal to index */ + if (mast->orig_r->last < mast->orig_r->index) + mast->orig_r->last = mast->orig_r->index; + /* + * The node may not contain the value so set slot to ensure all + * of the nodes contents are freed or destroyed. + */ + wr_mas.type = mte_node_type(mast->orig_r->node); + mas_wr_node_walk(&wr_mas); + /* Set up the left side of things */ + mast->orig_l->offset = 0; + mast->orig_l->index = mast->l->min; + wr_mas.mas = mast->orig_l; + wr_mas.type = mte_node_type(mast->orig_l->node); + mas_wr_node_walk(&wr_mas); + + mast->bn->type = wr_mas.type; +} + +/* + * mas_new_ma_node() - Create and return a new maple node. Helper function. + * @mas: the maple state with the allocations. + * @b_node: the maple_big_node with the type encoding. + * + * Use the node type from the maple_big_node to allocate a new node from the + * ma_state. This function exists mainly for code readability. + * + * Return: A new maple encoded node + */ +static inline struct maple_enode +*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) +{ + return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); +} + +/* + * mas_mab_to_node() - Set up right and middle nodes + * + * @mas: the maple state that contains the allocations. + * @b_node: the node which contains the data. + * @left: The pointer which will have the left node + * @right: The pointer which may have the right node + * @middle: the pointer which may have the middle node (rare) + * @mid_split: the split location for the middle node + * + * Return: the split of left. + */ +static inline unsigned char mas_mab_to_node(struct ma_state *mas, + struct maple_big_node *b_node, struct maple_enode **left, + struct maple_enode **right, struct maple_enode **middle, + unsigned char *mid_split, unsigned long min) +{ + unsigned char split = 0; + unsigned char slot_count = mt_slots[b_node->type]; + + *left = mas_new_ma_node(mas, b_node); + *right = NULL; + *middle = NULL; + *mid_split = 0; + + if (b_node->b_end < slot_count) { + split = b_node->b_end; + } else { + split = mab_calc_split(mas, b_node, mid_split, min); + *right = mas_new_ma_node(mas, b_node); + } + + if (*mid_split) + *middle = mas_new_ma_node(mas, b_node); + + return split; + +} + +/* + * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end + * pointer. + * @b_node - the big node to add the entry + * @mas - the maple state to get the pivot (mas->max) + * @entry - the entry to add, if NULL nothing happens. + */ +static inline void mab_set_b_end(struct maple_big_node *b_node, + struct ma_state *mas, + void *entry) +{ + if (!entry) + return; + + b_node->slot[b_node->b_end] = entry; + if (mt_is_alloc(mas->tree)) + b_node->gap[b_node->b_end] = mas_max_gap(mas); + b_node->pivot[b_node->b_end++] = mas->max; +} + +/* + * mas_set_split_parent() - combine_then_separate helper function. Sets the parent + * of @mas->node to either @left or @right, depending on @slot and @split + * + * @mas - the maple state with the node that needs a parent + * @left - possible parent 1 + * @right - possible parent 2 + * @slot - the slot the mas->node was placed + * @split - the split location between @left and @right + */ +static inline void mas_set_split_parent(struct ma_state *mas, + struct maple_enode *left, + struct maple_enode *right, + unsigned char *slot, unsigned char split) +{ + if (mas_is_none(mas)) + return; + + if ((*slot) <= split) + mte_set_parent(mas->node, left, *slot); + else if (right) + mte_set_parent(mas->node, right, (*slot) - split - 1); + + (*slot)++; +} + +/* + * mte_mid_split_check() - Check if the next node passes the mid-split + * @**l: Pointer to left encoded maple node. + * @**m: Pointer to middle encoded maple node. + * @**r: Pointer to right encoded maple node. + * @slot: The offset + * @*split: The split location. + * @mid_split: The middle split. + */ +static inline void mte_mid_split_check(struct maple_enode **l, + struct maple_enode **r, + struct maple_enode *right, + unsigned char slot, + unsigned char *split, + unsigned char mid_split) +{ + if (*r == right) + return; + + if (slot < mid_split) + return; + + *l = *r; + *r = right; + *split = mid_split; +} + +/* + * mast_set_split_parents() - Helper function to set three nodes parents. Slot + * is taken from @mast->l. + * @mast - the maple subtree state + * @left - the left node + * @right - the right node + * @split - the split location. + */ +static inline void mast_set_split_parents(struct maple_subtree_state *mast, + struct maple_enode *left, + struct maple_enode *middle, + struct maple_enode *right, + unsigned char split, + unsigned char mid_split) +{ + unsigned char slot; + struct maple_enode *l = left; + struct maple_enode *r = right; + + if (mas_is_none(mast->l)) + return; + + if (middle) + r = middle; + + slot = mast->l->offset; + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + mas_set_split_parent(mast->l, l, r, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + mas_set_split_parent(mast->m, l, r, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + mas_set_split_parent(mast->r, l, r, &slot, split); +} + +/* + * mas_wmb_replace() - Write memory barrier and replace + * @mas: The maple state + * @free: the maple topiary list of nodes to free + * @destroy: The maple topiary list of nodes to destroy (walk and free) + * + * Updates gap as necessary. + */ +static inline void mas_wmb_replace(struct ma_state *mas, + struct ma_topiary *free, + struct ma_topiary *destroy) +{ + /* All nodes must see old data as dead prior to replacing that data */ + smp_wmb(); /* Needed for RCU */ + + /* Insert the new data in the tree */ + mas_replace(mas, true); + + if (!mte_is_leaf(mas->node)) + mas_descend_adopt(mas); + + mas_mat_free(mas, free); + + if (destroy) + mas_mat_destroy(mas, destroy); + + if (mte_is_leaf(mas->node)) + return; + + mas_update_gap(mas); +} + +/* + * mast_new_root() - Set a new tree root during subtree creation + * @mast: The maple subtree state + * @mas: The maple state + */ +static inline void mast_new_root(struct maple_subtree_state *mast, + struct ma_state *mas) +{ + mas_mn(mast->l)->parent = + ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); + if (!mte_dead_node(mast->orig_l->node) && + !mte_is_root(mast->orig_l->node)) { + do { + mast_ascend_free(mast); + mast_topiary(mast); + } while (!mte_is_root(mast->orig_l->node)); + } + if ((mast->orig_l->node != mas->node) && + (mast->l->depth > mas_mt_height(mas))) { + mat_add(mast->free, mas->node); + } +} + +/* + * mast_cp_to_nodes() - Copy data out to nodes. + * @mast: The maple subtree state + * @left: The left encoded maple node + * @middle: The middle encoded maple node + * @right: The right encoded maple node + * @split: The location to split between left and (middle ? middle : right) + * @mid_split: The location to split between middle and right. + */ +static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, + struct maple_enode *left, struct maple_enode *middle, + struct maple_enode *right, unsigned char split, unsigned char mid_split) +{ + bool new_lmax = true; + + mast->l->node = mte_node_or_none(left); + mast->m->node = mte_node_or_none(middle); + mast->r->node = mte_node_or_none(right); + + mast->l->min = mast->orig_l->min; + if (split == mast->bn->b_end) { + mast->l->max = mast->orig_r->max; + new_lmax = false; + } + + mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); + + if (middle) { + mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); + mast->m->min = mast->bn->pivot[split] + 1; + split = mid_split; + } + + mast->r->max = mast->orig_r->max; + if (right) { + mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); + mast->r->min = mast->bn->pivot[split] + 1; + } +} + +/* + * mast_combine_cp_left - Copy in the original left side of the tree into the + * combined data set in the maple subtree state big node. + * @mast: The maple subtree state + */ +static inline void mast_combine_cp_left(struct maple_subtree_state *mast) +{ + unsigned char l_slot = mast->orig_l->offset; + + if (!l_slot) + return; + + mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); +} + +/* + * mast_combine_cp_right: Copy in the original right side of the tree into the + * combined data set in the maple subtree state big node. + * @mast: The maple subtree state + */ +static inline void mast_combine_cp_right(struct maple_subtree_state *mast) +{ + if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) + return; + + mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, + mt_slot_count(mast->orig_r->node), mast->bn, + mast->bn->b_end); + mast->orig_r->last = mast->orig_r->max; +} + +/* + * mast_sufficient: Check if the maple subtree state has enough data in the big + * node to create at least one sufficient node + * @mast: the maple subtree state + */ +static inline bool mast_sufficient(struct maple_subtree_state *mast) +{ + if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) + return true; + + return false; +} + +/* + * mast_overflow: Check if there is too much data in the subtree state for a + * single node. + * @mast: The maple subtree state + */ +static inline bool mast_overflow(struct maple_subtree_state *mast) +{ + if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) + return true; + + return false; +} + +static inline void *mtree_range_walk(struct ma_state *mas) +{ + unsigned long *pivots; + unsigned char offset; + struct maple_node *node; + struct maple_enode *next, *last; + enum maple_type type; + void __rcu **slots; + unsigned char end; + unsigned long max, min; + unsigned long prev_max, prev_min; + + last = next = mas->node; + prev_min = min = mas->min; + max = mas->max; + do { + offset = 0; + last = next; + node = mte_to_node(next); + type = mte_node_type(next); + pivots = ma_pivots(node, type); + end = ma_data_end(node, type, pivots, max); + if (unlikely(ma_dead_node(node))) + goto dead_node; + + if (pivots[offset] >= mas->index) { + prev_max = max; + prev_min = min; + max = pivots[offset]; + goto next; + } + + do { + offset++; + } while ((offset < end) && (pivots[offset] < mas->index)); + + prev_min = min; + min = pivots[offset - 1] + 1; + prev_max = max; + if (likely(offset < end && pivots[offset])) + max = pivots[offset]; + +next: + slots = ma_slots(node, type); + next = mt_slot(mas->tree, slots, offset); + if (unlikely(ma_dead_node(node))) + goto dead_node; + } while (!ma_is_leaf(type)); + + mas->offset = offset; + mas->index = min; + mas->last = max; + mas->min = prev_min; + mas->max = prev_max; + mas->node = last; + return (void *) next; + +dead_node: + mas_reset(mas); + return NULL; +} + +/* + * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. + * @mas: The starting maple state + * @mast: The maple_subtree_state, keeps track of 4 maple states. + * @count: The estimated count of iterations needed. + * + * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root + * is hit. First @b_node is split into two entries which are inserted into the + * next iteration of the loop. @b_node is returned populated with the final + * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the + * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last + * to account of what has been copied into the new sub-tree. The update of + * orig_l_mas->last is used in mas_consume to find the slots that will need to + * be either freed or destroyed. orig_l_mas->depth keeps track of the height of + * the new sub-tree in case the sub-tree becomes the full tree. + * + * Return: the number of elements in b_node during the last loop. + */ +static int mas_spanning_rebalance(struct ma_state *mas, + struct maple_subtree_state *mast, unsigned char count) +{ + unsigned char split, mid_split; + unsigned char slot = 0; + struct maple_enode *left = NULL, *middle = NULL, *right = NULL; + + MA_STATE(l_mas, mas->tree, mas->index, mas->index); + MA_STATE(r_mas, mas->tree, mas->index, mas->last); + MA_STATE(m_mas, mas->tree, mas->index, mas->index); + MA_TOPIARY(free, mas->tree); + MA_TOPIARY(destroy, mas->tree); + + /* + * The tree needs to be rebalanced and leaves need to be kept at the same level. + * Rebalancing is done by use of the ``struct maple_topiary``. + */ + mast->l = &l_mas; + mast->m = &m_mas; + mast->r = &r_mas; + mast->free = &free; + mast->destroy = &destroy; + l_mas.node = r_mas.node = m_mas.node = MAS_NONE; + if (!mas_is_root_limits(mast->orig_l) && + unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) + mast_spanning_rebalance(mast); + + mast->orig_l->depth = 0; + + /* + * Each level of the tree is examined and balanced, pushing data to the left or + * right, or rebalancing against left or right nodes is employed to avoid + * rippling up the tree to limit the amount of churn. Once a new sub-section of + * the tree is created, there may be a mix of new and old nodes. The old nodes + * will have the incorrect parent pointers and currently be in two trees: the + * original tree and the partially new tree. To remedy the parent pointers in + * the old tree, the new data is swapped into the active tree and a walk down + * the tree is performed and the parent pointers are updated. + * See mas_descend_adopt() for more information.. + */ + while (count--) { + mast->bn->b_end--; + mast->bn->type = mte_node_type(mast->orig_l->node); + split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, + &mid_split, mast->orig_l->min); + mast_set_split_parents(mast, left, middle, right, split, + mid_split); + mast_cp_to_nodes(mast, left, middle, right, split, mid_split); + + /* + * Copy data from next level in the tree to mast->bn from next + * iteration + */ + memset(mast->bn, 0, sizeof(struct maple_big_node)); + mast->bn->type = mte_node_type(left); + mast->orig_l->depth++; + + /* Root already stored in l->node. */ + if (mas_is_root_limits(mast->l)) + goto new_root; + + mast_ascend_free(mast); + mast_combine_cp_left(mast); + l_mas.offset = mast->bn->b_end; + mab_set_b_end(mast->bn, &l_mas, left); + mab_set_b_end(mast->bn, &m_mas, middle); + mab_set_b_end(mast->bn, &r_mas, right); + + /* Copy anything necessary out of the right node. */ + mast_combine_cp_right(mast); + mast_topiary(mast); + mast->orig_l->last = mast->orig_l->max; + + if (mast_sufficient(mast)) + continue; + + if (mast_overflow(mast)) + continue; + + /* May be a new root stored in mast->bn */ + if (mas_is_root_limits(mast->orig_l)) + break; + + if (!mast_spanning_rebalance(mast)) + break; + + /* rebalancing from other nodes may require another loop. */ + if (!count) + count++; + } + + l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), + mte_node_type(mast->orig_l->node)); + mast->orig_l->depth++; + mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); + mte_set_parent(left, l_mas.node, slot); + if (middle) + mte_set_parent(middle, l_mas.node, ++slot); + + if (right) + mte_set_parent(right, l_mas.node, ++slot); + + if (mas_is_root_limits(mast->l)) { +new_root: + mast_new_root(mast, mas); + } else { + mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; + } + + if (!mte_dead_node(mast->orig_l->node)) + mat_add(&free, mast->orig_l->node); + + mas->depth = mast->orig_l->depth; + *mast->orig_l = l_mas; + mte_set_node_dead(mas->node); + + /* Set up mas for insertion. */ + mast->orig_l->depth = mas->depth; + mast->orig_l->alloc = mas->alloc; + *mas = *mast->orig_l; + mas_wmb_replace(mas, &free, &destroy); + mtree_range_walk(mas); + return mast->bn->b_end; +} + +/* + * mas_rebalance() - Rebalance a given node. + * @mas: The maple state + * @b_node: The big maple node. + * + * Rebalance two nodes into a single node or two new nodes that are sufficient. + * Continue upwards until tree is sufficient. + * + * Return: the number of elements in b_node during the last loop. + */ +static inline int mas_rebalance(struct ma_state *mas, + struct maple_big_node *b_node) +{ + char empty_count = mas_mt_height(mas); + struct maple_subtree_state mast; + unsigned char shift, b_end = ++b_node->b_end; + + MA_STATE(l_mas, mas->tree, mas->index, mas->last); + MA_STATE(r_mas, mas->tree, mas->index, mas->last); + + trace_ma_op(__func__, mas); + + /* + * Rebalancing occurs if a node is insufficient. Data is rebalanced + * against the node to the right if it exists, otherwise the node to the + * left of this node is rebalanced against this node. If rebalancing + * causes just one node to be produced instead of two, then the parent + * is also examined and rebalanced if it is insufficient. Every level + * tries to combine the data in the same way. If one node contains the + * entire range of the tree, then that node is used as a new root node. + */ + mas_node_count(mas, 1 + empty_count * 3); + if (mas_is_err(mas)) + return 0; + + mast.orig_l = &l_mas; + mast.orig_r = &r_mas; + mast.bn = b_node; + mast.bn->type = mte_node_type(mas->node); + + l_mas = r_mas = *mas; + + if (mas_next_sibling(&r_mas)) { + mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); + r_mas.last = r_mas.index = r_mas.max; + } else { + mas_prev_sibling(&l_mas); + shift = mas_data_end(&l_mas) + 1; + mab_shift_right(b_node, shift); + mas->offset += shift; + mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); + b_node->b_end = shift + b_end; + l_mas.index = l_mas.last = l_mas.min; + } + + return mas_spanning_rebalance(mas, &mast, empty_count); +} + +/* + * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple + * state. + * @mas: The maple state + * @end: The end of the left-most node. + * + * During a mass-insert event (such as forking), it may be necessary to + * rebalance the left-most node when it is not sufficient. + */ +static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) +{ + enum maple_type mt = mte_node_type(mas->node); + struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; + struct maple_enode *eparent; + unsigned char offset, tmp, split = mt_slots[mt] / 2; + void __rcu **l_slots, **slots; + unsigned long *l_pivs, *pivs, gap; + bool in_rcu = mt_in_rcu(mas->tree); + + MA_STATE(l_mas, mas->tree, mas->index, mas->last); + + l_mas = *mas; + mas_prev_sibling(&l_mas); + + /* set up node. */ + if (in_rcu) { + /* Allocate for both left and right as well as parent. */ + mas_node_count(mas, 3); + if (mas_is_err(mas)) + return; + + newnode = mas_pop_node(mas); + } else { + newnode = &reuse; + } + + node = mas_mn(mas); + newnode->parent = node->parent; + slots = ma_slots(newnode, mt); + pivs = ma_pivots(newnode, mt); + left = mas_mn(&l_mas); + l_slots = ma_slots(left, mt); + l_pivs = ma_pivots(left, mt); + if (!l_slots[split]) + split++; + tmp = mas_data_end(&l_mas) - split; + + memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); + memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); + pivs[tmp] = l_mas.max; + memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); + memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); + + l_mas.max = l_pivs[split]; + mas->min = l_mas.max + 1; + eparent = mt_mk_node(mte_parent(l_mas.node), + mas_parent_enum(&l_mas, l_mas.node)); + tmp += end; + if (!in_rcu) { + unsigned char max_p = mt_pivots[mt]; + unsigned char max_s = mt_slots[mt]; + + if (tmp < max_p) + memset(pivs + tmp, 0, + sizeof(unsigned long *) * (max_p - tmp)); + + if (tmp < mt_slots[mt]) + memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); + + memcpy(node, newnode, sizeof(struct maple_node)); + ma_set_meta(node, mt, 0, tmp - 1); + mte_set_pivot(eparent, mte_parent_slot(l_mas.node), + l_pivs[split]); + + /* Remove data from l_pivs. */ + tmp = split + 1; + memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); + memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); + ma_set_meta(left, mt, 0, split); + + goto done; + } + + /* RCU requires replacing both l_mas, mas, and parent. */ + mas->node = mt_mk_node(newnode, mt); + ma_set_meta(newnode, mt, 0, tmp); + + new_left = mas_pop_node(mas); + new_left->parent = left->parent; + mt = mte_node_type(l_mas.node); + slots = ma_slots(new_left, mt); + pivs = ma_pivots(new_left, mt); + memcpy(slots, l_slots, sizeof(void *) * split); + memcpy(pivs, l_pivs, sizeof(unsigned long) * split); + ma_set_meta(new_left, mt, 0, split); + l_mas.node = mt_mk_node(new_left, mt); + + /* replace parent. */ + offset = mte_parent_slot(mas->node); + mt = mas_parent_enum(&l_mas, l_mas.node); + parent = mas_pop_node(mas); + slots = ma_slots(parent, mt); + pivs = ma_pivots(parent, mt); + memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); + rcu_assign_pointer(slots[offset], mas->node); + rcu_assign_pointer(slots[offset - 1], l_mas.node); + pivs[offset - 1] = l_mas.max; + eparent = mt_mk_node(parent, mt); +done: + gap = mas_leaf_max_gap(mas); + mte_set_gap(eparent, mte_parent_slot(mas->node), gap); + gap = mas_leaf_max_gap(&l_mas); + mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); + mas_ascend(mas); + + if (in_rcu) + mas_replace(mas, false); + + mas_update_gap(mas); +} + +/* + * mas_split_final_node() - Split the final node in a subtree operation. + * @mast: the maple subtree state + * @mas: The maple state + * @height: The height of the tree in case it's a new root. + */ +static inline bool mas_split_final_node(struct maple_subtree_state *mast, + struct ma_state *mas, int height) +{ + struct maple_enode *ancestor; + + if (mte_is_root(mas->node)) { + if (mt_is_alloc(mas->tree)) + mast->bn->type = maple_arange_64; + else + mast->bn->type = maple_range_64; + mas->depth = height; + } + /* + * Only a single node is used here, could be root. + * The Big_node data should just fit in a single node. + */ + ancestor = mas_new_ma_node(mas, mast->bn); + mte_set_parent(mast->l->node, ancestor, mast->l->offset); + mte_set_parent(mast->r->node, ancestor, mast->r->offset); + mte_to_node(ancestor)->parent = mas_mn(mas)->parent; + + mast->l->node = ancestor; + mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); + mas->offset = mast->bn->b_end - 1; + return true; +} + +/* + * mast_fill_bnode() - Copy data into the big node in the subtree state + * @mast: The maple subtree state + * @mas: the maple state + * @skip: The number of entries to skip for new nodes insertion. + */ +static inline void mast_fill_bnode(struct maple_subtree_state *mast, + struct ma_state *mas, + unsigned char skip) +{ + bool cp = true; + struct maple_enode *old = mas->node; + unsigned char split; + + memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); + memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); + memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); + mast->bn->b_end = 0; + + if (mte_is_root(mas->node)) { + cp = false; + } else { + mas_ascend(mas); + mat_add(mast->free, old); + mas->offset = mte_parent_slot(mas->node); + } + + if (cp && mast->l->offset) + mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); + + split = mast->bn->b_end; + mab_set_b_end(mast->bn, mast->l, mast->l->node); + mast->r->offset = mast->bn->b_end; + mab_set_b_end(mast->bn, mast->r, mast->r->node); + if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) + cp = false; + + if (cp) + mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, + mast->bn, mast->bn->b_end); + + mast->bn->b_end--; + mast->bn->type = mte_node_type(mas->node); +} + +/* + * mast_split_data() - Split the data in the subtree state big node into regular + * nodes. + * @mast: The maple subtree state + * @mas: The maple state + * @split: The location to split the big node + */ +static inline void mast_split_data(struct maple_subtree_state *mast, + struct ma_state *mas, unsigned char split) +{ + unsigned char p_slot; + + mab_mas_cp(mast->bn, 0, split, mast->l, true); + mte_set_pivot(mast->r->node, 0, mast->r->max); + mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); + mast->l->offset = mte_parent_slot(mas->node); + mast->l->max = mast->bn->pivot[split]; + mast->r->min = mast->l->max + 1; + if (mte_is_leaf(mas->node)) + return; + + p_slot = mast->orig_l->offset; + mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, + &p_slot, split); + mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, + &p_slot, split); +} + +/* + * mas_push_data() - Instead of splitting a node, it is beneficial to push the + * data to the right or left node if there is room. + * @mas: The maple state + * @height: The current height of the maple state + * @mast: The maple subtree state + * @left: Push left or not. + * + * Keeping the height of the tree low means faster lookups. + * + * Return: True if pushed, false otherwise. + */ +static inline bool mas_push_data(struct ma_state *mas, int height, + struct maple_subtree_state *mast, bool left) +{ + unsigned char slot_total = mast->bn->b_end; + unsigned char end, space, split; + + MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); + tmp_mas = *mas; + tmp_mas.depth = mast->l->depth; + + if (left && !mas_prev_sibling(&tmp_mas)) + return false; + else if (!left && !mas_next_sibling(&tmp_mas)) + return false; + + end = mas_data_end(&tmp_mas); + slot_total += end; + space = 2 * mt_slot_count(mas->node) - 2; + /* -2 instead of -1 to ensure there isn't a triple split */ + if (ma_is_leaf(mast->bn->type)) + space--; + + if (mas->max == ULONG_MAX) + space--; + + if (slot_total >= space) + return false; + + /* Get the data; Fill mast->bn */ + mast->bn->b_end++; + if (left) { + mab_shift_right(mast->bn, end + 1); + mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); + mast->bn->b_end = slot_total + 1; + } else { + mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); + } + + /* Configure mast for splitting of mast->bn */ + split = mt_slots[mast->bn->type] - 2; + if (left) { + /* Switch mas to prev node */ + mat_add(mast->free, mas->node); + *mas = tmp_mas; + /* Start using mast->l for the left side. */ + tmp_mas.node = mast->l->node; + *mast->l = tmp_mas; + } else { + mat_add(mast->free, tmp_mas.node); + tmp_mas.node = mast->r->node; + *mast->r = tmp_mas; + split = slot_total - split; + } + split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); + /* Update parent slot for split calculation. */ + if (left) + mast->orig_l->offset += end + 1; + + mast_split_data(mast, mas, split); + mast_fill_bnode(mast, mas, 2); + mas_split_final_node(mast, mas, height + 1); + return true; +} + +/* + * mas_split() - Split data that is too big for one node into two. + * @mas: The maple state + * @b_node: The maple big node + * Return: 1 on success, 0 on failure. + */ +static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) +{ + + struct maple_subtree_state mast; + int height = 0; + unsigned char mid_split, split = 0; + + /* + * Splitting is handled differently from any other B-tree; the Maple + * Tree splits upwards. Splitting up means that the split operation + * occurs when the walk of the tree hits the leaves and not on the way + * down. The reason for splitting up is that it is impossible to know + * how much space will be needed until the leaf is (or leaves are) + * reached. Since overwriting data is allowed and a range could + * overwrite more than one range or result in changing one entry into 3 + * entries, it is impossible to know if a split is required until the + * data is examined. + * + * Splitting is a balancing act between keeping allocations to a minimum + * and avoiding a 'jitter' event where a tree is expanded to make room + * for an entry followed by a contraction when the entry is removed. To + * accomplish the balance, there are empty slots remaining in both left + * and right nodes after a split. + */ + MA_STATE(l_mas, mas->tree, mas->index, mas->last); + MA_STATE(r_mas, mas->tree, mas->index, mas->last); + MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); + MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); + MA_TOPIARY(mat, mas->tree); + + trace_ma_op(__func__, mas); + mas->depth = mas_mt_height(mas); + /* Allocation failures will happen early. */ + mas_node_count(mas, 1 + mas->depth * 2); + if (mas_is_err(mas)) + return 0; + + mast.l = &l_mas; + mast.r = &r_mas; + mast.orig_l = &prev_l_mas; + mast.orig_r = &prev_r_mas; + mast.free = &mat; + mast.bn = b_node; + + while (height++ <= mas->depth) { + if (mt_slots[b_node->type] > b_node->b_end) { + mas_split_final_node(&mast, mas, height); + break; + } + + l_mas = r_mas = *mas; + l_mas.node = mas_new_ma_node(mas, b_node); + r_mas.node = mas_new_ma_node(mas, b_node); + /* + * Another way that 'jitter' is avoided is to terminate a split up early if the + * left or right node has space to spare. This is referred to as "pushing left" + * or "pushing right" and is similar to the B* tree, except the nodes left or + * right can rarely be reused due to RCU, but the ripple upwards is halted which + * is a significant savings. + */ + /* Try to push left. */ + if (mas_push_data(mas, height, &mast, true)) + break; + + /* Try to push right. */ + if (mas_push_data(mas, height, &mast, false)) + break; + + split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); + mast_split_data(&mast, mas, split); + /* + * Usually correct, mab_mas_cp in the above call overwrites + * r->max. + */ + mast.r->max = mas->max; + mast_fill_bnode(&mast, mas, 1); + prev_l_mas = *mast.l; + prev_r_mas = *mast.r; + } + + /* Set the original node as dead */ + mat_add(mast.free, mas->node); + mas->node = l_mas.node; + mas_wmb_replace(mas, mast.free, NULL); + mtree_range_walk(mas); + return 1; +} + +/* + * mas_reuse_node() - Reuse the node to store the data. + * @wr_mas: The maple write state + * @bn: The maple big node + * @end: The end of the data. + * + * Will always return false in RCU mode. + * + * Return: True if node was reused, false otherwise. + */ +static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, + struct maple_big_node *bn, unsigned char end) +{ + /* Need to be rcu safe. */ + if (mt_in_rcu(wr_mas->mas->tree)) + return false; + + if (end > bn->b_end) { + int clear = mt_slots[wr_mas->type] - bn->b_end; + + memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); + memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); + } + mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); + return true; +} + +/* + * mas_commit_b_node() - Commit the big node into the tree. + * @wr_mas: The maple write state + * @b_node: The maple big node + * @end: The end of the data. + */ +static inline int mas_commit_b_node(struct ma_wr_state *wr_mas, + struct maple_big_node *b_node, unsigned char end) +{ + struct maple_node *node; + unsigned char b_end = b_node->b_end; + enum maple_type b_type = b_node->type; + + if ((b_end < mt_min_slots[b_type]) && + (!mte_is_root(wr_mas->mas->node)) && + (mas_mt_height(wr_mas->mas) > 1)) + return mas_rebalance(wr_mas->mas, b_node); + + if (b_end >= mt_slots[b_type]) + return mas_split(wr_mas->mas, b_node); + + if (mas_reuse_node(wr_mas, b_node, end)) + goto reuse_node; + + mas_node_count(wr_mas->mas, 1); + if (mas_is_err(wr_mas->mas)) + return 0; + + node = mas_pop_node(wr_mas->mas); + node->parent = mas_mn(wr_mas->mas)->parent; + wr_mas->mas->node = mt_mk_node(node, b_type); + mab_mas_cp(b_node, 0, b_end, wr_mas->mas, true); + + mas_replace(wr_mas->mas, false); +reuse_node: + mas_update_gap(wr_mas->mas); + return 1; +} + +/* + * mas_root_expand() - Expand a root to a node + * @mas: The maple state + * @entry: The entry to store into the tree + */ +static inline int mas_root_expand(struct ma_state *mas, void *entry) +{ + void *contents = mas_root_locked(mas); + enum maple_type type = maple_leaf_64; + struct maple_node *node; + void __rcu **slots; + unsigned long *pivots; + int slot = 0; + + mas_node_count(mas, 1); + if (unlikely(mas_is_err(mas))) + return 0; + + node = mas_pop_node(mas); + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + node->parent = ma_parent_ptr( + ((unsigned long)mas->tree | MA_ROOT_PARENT)); + mas->node = mt_mk_node(node, type); + + if (mas->index) { + if (contents) { + rcu_assign_pointer(slots[slot], contents); + if (likely(mas->index > 1)) + slot++; + } + pivots[slot++] = mas->index - 1; + } + + rcu_assign_pointer(slots[slot], entry); + mas->offset = slot; + pivots[slot] = mas->last; + if (mas->last != ULONG_MAX) + slot++; + mas->depth = 1; + mas_set_height(mas); + + /* swap the new root into the tree */ + rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); + ma_set_meta(node, maple_leaf_64, 0, slot); + return slot; +} + +static inline void mas_store_root(struct ma_state *mas, void *entry) +{ + if (likely((mas->last != 0) || (mas->index != 0))) + mas_root_expand(mas, entry); + else if (((unsigned long) (entry) & 3) == 2) + mas_root_expand(mas, entry); + else { + rcu_assign_pointer(mas->tree->ma_root, entry); + mas->node = MAS_START; + } +} + +/* + * mas_is_span_wr() - Check if the write needs to be treated as a write that + * spans the node. + * @mas: The maple state + * @piv: The pivot value being written + * @type: The maple node type + * @entry: The data to write + * + * Spanning writes are writes that start in one node and end in another OR if + * the write of a %NULL will cause the node to end with a %NULL. + * + * Return: True if this is a spanning write, false otherwise. + */ +static bool mas_is_span_wr(struct ma_wr_state *wr_mas) +{ + unsigned long max; + unsigned long last = wr_mas->mas->last; + unsigned long piv = wr_mas->r_max; + enum maple_type type = wr_mas->type; + void *entry = wr_mas->entry; + + /* Contained in this pivot */ + if (piv > last) + return false; + + max = wr_mas->mas->max; + if (unlikely(ma_is_leaf(type))) { + /* Fits in the node, but may span slots. */ + if (last < max) + return false; + + /* Writes to the end of the node but not null. */ + if ((last == max) && entry) + return false; + + /* + * Writing ULONG_MAX is not a spanning write regardless of the + * value being written as long as the range fits in the node. + */ + if ((last == ULONG_MAX) && (last == max)) + return false; + } else if (piv == last) { + if (entry) + return false; + + /* Detect spanning store wr walk */ + if (last == ULONG_MAX) + return false; + } + + trace_ma_write(__func__, wr_mas->mas, piv, entry); + + return true; +} + +static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) +{ + wr_mas->mas->depth++; + wr_mas->type = mte_node_type(wr_mas->mas->node); + mas_wr_node_walk(wr_mas); + wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); +} + +static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) +{ + wr_mas->mas->max = wr_mas->r_max; + wr_mas->mas->min = wr_mas->r_min; + wr_mas->mas->node = wr_mas->content; + wr_mas->mas->offset = 0; +} +/* + * mas_wr_walk() - Walk the tree for a write. + * @wr_mas: The maple write state + * + * Uses mas_slot_locked() and does not need to worry about dead nodes. + * + * Return: True if it's contained in a node, false on spanning write. + */ +static bool mas_wr_walk(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + + while (true) { + mas_wr_walk_descend(wr_mas); + if (unlikely(mas_is_span_wr(wr_mas))) + return false; + + wr_mas->content = mas_slot_locked(mas, wr_mas->slots, + mas->offset); + if (ma_is_leaf(wr_mas->type)) + return true; + + mas_wr_walk_traverse(wr_mas); + } + + return true; +} + +static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + + while (true) { + mas_wr_walk_descend(wr_mas); + wr_mas->content = mas_slot_locked(mas, wr_mas->slots, + mas->offset); + if (ma_is_leaf(wr_mas->type)) + return true; + mas_wr_walk_traverse(wr_mas); + + } + return true; +} +/* + * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. + * @l_wr_mas: The left maple write state + * @r_wr_mas: The right maple write state + */ +static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, + struct ma_wr_state *r_wr_mas) +{ + struct ma_state *r_mas = r_wr_mas->mas; + struct ma_state *l_mas = l_wr_mas->mas; + unsigned char l_slot; + + l_slot = l_mas->offset; + if (!l_wr_mas->content) + l_mas->index = l_wr_mas->r_min; + + if ((l_mas->index == l_wr_mas->r_min) && + (l_slot && + !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { + if (l_slot > 1) + l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; + else + l_mas->index = l_mas->min; + + l_mas->offset = l_slot - 1; + } + + if (!r_wr_mas->content) { + if (r_mas->last < r_wr_mas->r_max) + r_mas->last = r_wr_mas->r_max; + r_mas->offset++; + } else if ((r_mas->last == r_wr_mas->r_max) && + (r_mas->last < r_mas->max) && + !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { + r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, + r_wr_mas->type, r_mas->offset + 1); + r_mas->offset++; + } +} + +static inline void *mas_state_walk(struct ma_state *mas) +{ + void *entry; + + entry = mas_start(mas); + if (mas_is_none(mas)) + return NULL; + + if (mas_is_ptr(mas)) + return entry; + + return mtree_range_walk(mas); +} + +/* + * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up + * to date. + * + * @mas: The maple state. + * + * Note: Leaves mas in undesirable state. + * Return: The entry for @mas->index or %NULL on dead node. + */ +static inline void *mtree_lookup_walk(struct ma_state *mas) +{ + unsigned long *pivots; + unsigned char offset; + struct maple_node *node; + struct maple_enode *next; + enum maple_type type; + void __rcu **slots; + unsigned char end; + unsigned long max; + + next = mas->node; + max = ULONG_MAX; + do { + offset = 0; + node = mte_to_node(next); + type = mte_node_type(next); + pivots = ma_pivots(node, type); + end = ma_data_end(node, type, pivots, max); + if (unlikely(ma_dead_node(node))) + goto dead_node; + + if (pivots[offset] >= mas->index) + goto next; + + do { + offset++; + } while ((offset < end) && (pivots[offset] < mas->index)); + + if (likely(offset > end)) + max = pivots[offset]; + +next: + slots = ma_slots(node, type); + next = mt_slot(mas->tree, slots, offset); + if (unlikely(ma_dead_node(node))) + goto dead_node; + } while (!ma_is_leaf(type)); + + return (void *) next; + +dead_node: + mas_reset(mas); + return NULL; +} + +/* + * mas_new_root() - Create a new root node that only contains the entry passed + * in. + * @mas: The maple state + * @entry: The entry to store. + * + * Only valid when the index == 0 and the last == ULONG_MAX + * + * Return 0 on error, 1 on success. + */ +static inline int mas_new_root(struct ma_state *mas, void *entry) +{ + struct maple_enode *root = mas_root_locked(mas); + enum maple_type type = maple_leaf_64; + struct maple_node *node; + void __rcu **slots; + unsigned long *pivots; + + if (!entry && !mas->index && mas->last == ULONG_MAX) { + mas->depth = 0; + mas_set_height(mas); + rcu_assign_pointer(mas->tree->ma_root, entry); + mas->node = MAS_START; + goto done; + } + + mas_node_count(mas, 1); + if (mas_is_err(mas)) + return 0; + + node = mas_pop_node(mas); + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + node->parent = ma_parent_ptr( + ((unsigned long)mas->tree | MA_ROOT_PARENT)); + mas->node = mt_mk_node(node, type); + rcu_assign_pointer(slots[0], entry); + pivots[0] = mas->last; + mas->depth = 1; + mas_set_height(mas); + rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); + +done: + if (xa_is_node(root)) + mte_destroy_walk(root, mas->tree); + + return 1; +} +/* + * mas_wr_spanning_store() - Create a subtree with the store operation completed + * and new nodes where necessary, then place the sub-tree in the actual tree. + * Note that mas is expected to point to the node which caused the store to + * span. + * @wr_mas: The maple write state + * + * Return: 0 on error, positive on success. + */ +static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) +{ + struct maple_subtree_state mast; + struct maple_big_node b_node; + struct ma_state *mas; + unsigned char height; + + /* Left and Right side of spanning store */ + MA_STATE(l_mas, NULL, 0, 0); + MA_STATE(r_mas, NULL, 0, 0); + + MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); + MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); + + /* + * A store operation that spans multiple nodes is called a spanning + * store and is handled early in the store call stack by the function + * mas_is_span_wr(). When a spanning store is identified, the maple + * state is duplicated. The first maple state walks the left tree path + * to ``index``, the duplicate walks the right tree path to ``last``. + * The data in the two nodes are combined into a single node, two nodes, + * or possibly three nodes (see the 3-way split above). A ``NULL`` + * written to the last entry of a node is considered a spanning store as + * a rebalance is required for the operation to complete and an overflow + * of data may happen. + */ + mas = wr_mas->mas; + trace_ma_op(__func__, mas); + + if (unlikely(!mas->index && mas->last == ULONG_MAX)) + return mas_new_root(mas, wr_mas->entry); + /* + * Node rebalancing may occur due to this store, so there may be two new + * entries per level plus a new root. + */ + height = mas_mt_height(mas); + mas_node_count(mas, 1 + height * 3); + if (mas_is_err(mas)) + return 0; + + /* + * Set up right side. Need to get to the next offset after the spanning + * store to ensure it's not NULL and to combine both the next node and + * the node with the start together. + */ + r_mas = *mas; + /* Avoid overflow, walk to next slot in the tree. */ + if (r_mas.last + 1) + r_mas.last++; + + r_mas.index = r_mas.last; + mas_wr_walk_index(&r_wr_mas); + r_mas.last = r_mas.index = mas->last; + + /* Set up left side. */ + l_mas = *mas; + mas_wr_walk_index(&l_wr_mas); + + if (!wr_mas->entry) { + mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); + mas->offset = l_mas.offset; + mas->index = l_mas.index; + mas->last = l_mas.last = r_mas.last; + } + + memset(&b_node, 0, sizeof(struct maple_big_node)); + /* Copy l_mas and store the value in b_node. */ + mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); + /* Copy r_mas into b_node. */ + if (r_mas.offset <= r_wr_mas.node_end) + mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, + &b_node, b_node.b_end + 1); + /* Stop spanning searches by searching for just index. */ + l_mas.index = l_mas.last = mas->index; + + mast.bn = &b_node; + mast.orig_l = &l_mas; + mast.orig_r = &r_mas; + /* Combine l_mas and r_mas and split them up evenly again. */ + return mas_spanning_rebalance(mas, &mast, height + 1); +} + +/* + * mas_wr_node_store() - Attempt to store the value in a node + * @wr_mas: The maple write state + * + * Attempts to reuse the node, but may allocate. + * + * Return: True if stored, false otherwise + */ +static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + void __rcu **dst_slots; + unsigned long *dst_pivots; + unsigned char dst_offset; + unsigned char new_end = wr_mas->node_end; + unsigned char offset; + unsigned char node_slots = mt_slots[wr_mas->type]; + struct maple_node reuse, *newnode; + unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; + bool in_rcu = mt_in_rcu(mas->tree); + + offset = mas->offset; + if (mas->last == wr_mas->r_max) { + /* runs right to the end of the node */ + if (mas->last == mas->max) + new_end = offset; + /* don't copy this offset */ + wr_mas->offset_end++; + } else if (mas->last < wr_mas->r_max) { + /* new range ends in this range */ + if (unlikely(wr_mas->r_max == ULONG_MAX)) + mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); + + new_end++; + } else { + if (wr_mas->end_piv == mas->last) + wr_mas->offset_end++; + + new_end -= wr_mas->offset_end - offset - 1; + } + + /* new range starts within a range */ + if (wr_mas->r_min < mas->index) + new_end++; + + /* Not enough room */ + if (new_end >= node_slots) + return false; + + /* Not enough data. */ + if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && + !(mas->mas_flags & MA_STATE_BULK)) + return false; + + /* set up node. */ + if (in_rcu) { + mas_node_count(mas, 1); + if (mas_is_err(mas)) + return false; + + newnode = mas_pop_node(mas); + } else { + memset(&reuse, 0, sizeof(struct maple_node)); + newnode = &reuse; + } + + newnode->parent = mas_mn(mas)->parent; + dst_pivots = ma_pivots(newnode, wr_mas->type); + dst_slots = ma_slots(newnode, wr_mas->type); + /* Copy from start to insert point */ + memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); + memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); + dst_offset = offset; + + /* Handle insert of new range starting after old range */ + if (wr_mas->r_min < mas->index) { + mas->offset++; + rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); + dst_pivots[dst_offset++] = mas->index - 1; + } + + /* Store the new entry and range end. */ + if (dst_offset < max_piv) + dst_pivots[dst_offset] = mas->last; + mas->offset = dst_offset; + rcu_assign_pointer(dst_slots[dst_offset++], wr_mas->entry); + + /* this range wrote to the end of the node. */ + if (wr_mas->offset_end > wr_mas->node_end) + goto done; + + /* Copy to the end of node if necessary. */ + copy_size = wr_mas->node_end - wr_mas->offset_end + 1; + memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, + sizeof(void *) * copy_size); + if (dst_offset < max_piv) { + if (copy_size > max_piv - dst_offset) + copy_size = max_piv - dst_offset; + memcpy(dst_pivots + dst_offset, wr_mas->pivots + wr_mas->offset_end, + sizeof(unsigned long) * copy_size); + } + +done: + if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) + dst_pivots[new_end] = mas->max; + + mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); + if (in_rcu) { + mas->node = mt_mk_node(newnode, wr_mas->type); + mas_replace(mas, false); + } else { + memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); + } + trace_ma_write(__func__, mas, 0, wr_mas->entry); + mas_update_gap(mas); + return true; +} + +/* + * mas_wr_slot_store: Attempt to store a value in a slot. + * @wr_mas: the maple write state + * + * Return: True if stored, false otherwise + */ +static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + unsigned long lmax; /* Logical max. */ + unsigned char offset = mas->offset; + + if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || + (offset != wr_mas->node_end))) + return false; + + if (offset == wr_mas->node_end - 1) + lmax = mas->max; + else + lmax = wr_mas->pivots[offset + 1]; + + /* going to overwrite too many slots. */ + if (lmax < mas->last) + return false; + + if (wr_mas->r_min == mas->index) { + /* overwriting two or more ranges with one. */ + if (lmax == mas->last) + return false; + + /* Overwriting all of offset and a portion of offset + 1. */ + rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); + wr_mas->pivots[offset] = mas->last; + goto done; + } + + /* Doesn't end on the next range end. */ + if (lmax != mas->last) + return false; + + /* Overwriting a portion of offset and all of offset + 1 */ + if ((offset + 1 < mt_pivots[wr_mas->type]) && + (wr_mas->entry || wr_mas->pivots[offset + 1])) + wr_mas->pivots[offset + 1] = mas->last; + + rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); + wr_mas->pivots[offset] = mas->index - 1; + mas->offset++; /* Keep mas accurate. */ + +done: + trace_ma_write(__func__, mas, 0, wr_mas->entry); + mas_update_gap(mas); + return true; +} + +static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) +{ + while ((wr_mas->mas->last > wr_mas->end_piv) && + (wr_mas->offset_end < wr_mas->node_end)) + wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; + + if (wr_mas->mas->last > wr_mas->end_piv) + wr_mas->end_piv = wr_mas->mas->max; +} + +static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + + if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) + mas->last = wr_mas->end_piv; + + /* Check next slot(s) if we are overwriting the end */ + if ((mas->last == wr_mas->end_piv) && + (wr_mas->node_end != wr_mas->offset_end) && + !wr_mas->slots[wr_mas->offset_end + 1]) { + wr_mas->offset_end++; + if (wr_mas->offset_end == wr_mas->node_end) + mas->last = mas->max; + else + mas->last = wr_mas->pivots[wr_mas->offset_end]; + wr_mas->end_piv = mas->last; + } + + if (!wr_mas->content) { + /* If this one is null, the next and prev are not */ + mas->index = wr_mas->r_min; + } else { + /* Check prev slot if we are overwriting the start */ + if (mas->index == wr_mas->r_min && mas->offset && + !wr_mas->slots[mas->offset - 1]) { + mas->offset--; + wr_mas->r_min = mas->index = + mas_safe_min(mas, wr_mas->pivots, mas->offset); + wr_mas->r_max = wr_mas->pivots[mas->offset]; + } + } +} + +static inline bool mas_wr_append(struct ma_wr_state *wr_mas) +{ + unsigned char end = wr_mas->node_end; + unsigned char new_end = end + 1; + struct ma_state *mas = wr_mas->mas; + unsigned char node_pivots = mt_pivots[wr_mas->type]; + + if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { + if (new_end < node_pivots) + wr_mas->pivots[new_end] = wr_mas->pivots[end]; + + if (new_end < node_pivots) + ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); + + rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); + mas->offset = new_end; + wr_mas->pivots[end] = mas->index - 1; + + return true; + } + + if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { + if (new_end < node_pivots) + wr_mas->pivots[new_end] = wr_mas->pivots[end]; + + rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); + if (new_end < node_pivots) + ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); + + wr_mas->pivots[end] = mas->last; + rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); + return true; + } + + return false; +} + +static inline void mas_wr_modify(struct ma_wr_state *wr_mas) +{ + unsigned char node_slots; + unsigned char node_size; + struct ma_state *mas = wr_mas->mas; + struct maple_big_node b_node; + + /* Direct replacement */ + if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { + rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); + if (!!wr_mas->entry ^ !!wr_mas->content) + mas_update_gap(mas); + return; + } + + /* Attempt to append */ + node_slots = mt_slots[wr_mas->type]; + node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; + if (mas->max == ULONG_MAX) + node_size++; + + /* slot and node store will not fit, go to the slow path */ + if (unlikely(node_size >= node_slots)) + goto slow_path; + + if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && + (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { + if (!wr_mas->content || !wr_mas->entry) + mas_update_gap(mas); + return; + } + + if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) + return; + else if (mas_wr_node_store(wr_mas)) + return; + + if (mas_is_err(mas)) + return; + +slow_path: + memset(&b_node, 0, sizeof(struct maple_big_node)); + mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); + trace_ma_write(__func__, mas, 0, wr_mas->entry); + mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); +} + +/* + * mas_wr_store_entry() - Internal call to store a value + * @mas: The maple state + * @entry: The entry to store. + * + * Return: The contents that was stored at the index. + */ +static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + + wr_mas->content = mas_start(mas); + if (mas_is_none(mas) || mas_is_ptr(mas)) { + mas_store_root(mas, wr_mas->entry); + return wr_mas->content; + } + + if (unlikely(!mas_wr_walk(wr_mas))) { + mas_wr_spanning_store(wr_mas); + return wr_mas->content; + } + + /* At this point, we are at the leaf node that needs to be altered. */ + wr_mas->end_piv = wr_mas->r_max; + mas_wr_end_piv(wr_mas); + + if (!wr_mas->entry) + mas_wr_extend_null(wr_mas); + + /* New root for a single pointer */ + if (unlikely(!mas->index && mas->last == ULONG_MAX)) { + mas_new_root(mas, wr_mas->entry); + return wr_mas->content; + } + + mas_wr_modify(wr_mas); + return wr_mas->content; +} + +/** + * mas_insert() - Internal call to insert a value + * @mas: The maple state + * @entry: The entry to store + * + * Return: %NULL or the contents that already exists at the requested index + * otherwise. The maple state needs to be checked for error conditions. + */ +static inline void *mas_insert(struct ma_state *mas, void *entry) +{ + MA_WR_STATE(wr_mas, mas, entry); + + /* + * Inserting a new range inserts either 0, 1, or 2 pivots within the + * tree. If the insert fits exactly into an existing gap with a value + * of NULL, then the slot only needs to be written with the new value. + * If the range being inserted is adjacent to another range, then only a + * single pivot needs to be inserted (as well as writing the entry). If + * the new range is within a gap but does not touch any other ranges, + * then two pivots need to be inserted: the start - 1, and the end. As + * usual, the entry must be written. Most operations require a new node + * to be allocated and replace an existing node to ensure RCU safety, + * when in RCU mode. The exception to requiring a newly allocated node + * is when inserting at the end of a node (appending). When done + * carefully, appending can reuse the node in place. + */ + wr_mas.content = mas_start(mas); + if (wr_mas.content) + goto exists; + + if (mas_is_none(mas) || mas_is_ptr(mas)) { + mas_store_root(mas, entry); + return NULL; + } + + /* spanning writes always overwrite something */ + if (!mas_wr_walk(&wr_mas)) + goto exists; + + /* At this point, we are at the leaf node that needs to be altered. */ + wr_mas.offset_end = mas->offset; + wr_mas.end_piv = wr_mas.r_max; + + if (wr_mas.content || (mas->last > wr_mas.r_max)) + goto exists; + + if (!entry) + return NULL; + + mas_wr_modify(&wr_mas); + return wr_mas.content; + +exists: + mas_set_err(mas, -EEXIST); + return wr_mas.content; + +} + +/* + * mas_prev_node() - Find the prev non-null entry at the same level in the + * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. + * @mas: The maple state + * @min: The lower limit to search + * + * The prev node value will be mas->node[mas->offset] or MAS_NONE. + * Return: 1 if the node is dead, 0 otherwise. + */ +static inline int mas_prev_node(struct ma_state *mas, unsigned long min) +{ + enum maple_type mt; + int offset, level; + void __rcu **slots; + struct maple_node *node; + struct maple_enode *enode; + unsigned long *pivots; + + if (mas_is_none(mas)) + return 0; + + level = 0; + do { + node = mas_mn(mas); + if (ma_is_root(node)) + goto no_entry; + + /* Walk up. */ + if (unlikely(mas_ascend(mas))) + return 1; + offset = mas->offset; + level++; + } while (!offset); + + offset--; + mt = mte_node_type(mas->node); + node = mas_mn(mas); + slots = ma_slots(node, mt); + pivots = ma_pivots(node, mt); + mas->max = pivots[offset]; + if (offset) + mas->min = pivots[offset - 1] + 1; + if (unlikely(ma_dead_node(node))) + return 1; + + if (mas->max < min) + goto no_entry_min; + + while (level > 1) { + level--; + enode = mas_slot(mas, slots, offset); + if (unlikely(ma_dead_node(node))) + return 1; + + mas->node = enode; + mt = mte_node_type(mas->node); + node = mas_mn(mas); + slots = ma_slots(node, mt); + pivots = ma_pivots(node, mt); + offset = ma_data_end(node, mt, pivots, mas->max); + if (offset) + mas->min = pivots[offset - 1] + 1; + + if (offset < mt_pivots[mt]) + mas->max = pivots[offset]; + + if (mas->max < min) + goto no_entry; + } + + mas->node = mas_slot(mas, slots, offset); + if (unlikely(ma_dead_node(node))) + return 1; + + mas->offset = mas_data_end(mas); + if (unlikely(mte_dead_node(mas->node))) + return 1; + + return 0; + +no_entry_min: + mas->offset = offset; + if (offset) + mas->min = pivots[offset - 1] + 1; +no_entry: + if (unlikely(ma_dead_node(node))) + return 1; + + mas->node = MAS_NONE; + return 0; +} + +/* + * mas_next_node() - Get the next node at the same level in the tree. + * @mas: The maple state + * @max: The maximum pivot value to check. + * + * The next value will be mas->node[mas->offset] or MAS_NONE. + * Return: 1 on dead node, 0 otherwise. + */ +static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, + unsigned long max) +{ + unsigned long min, pivot; + unsigned long *pivots; + struct maple_enode *enode; + int level = 0; + unsigned char offset; + enum maple_type mt; + void __rcu **slots; + + if (mas->max >= max) + goto no_entry; + + level = 0; + do { + if (ma_is_root(node)) + goto no_entry; + + min = mas->max + 1; + if (min > max) + goto no_entry; + + if (unlikely(mas_ascend(mas))) + return 1; + + offset = mas->offset; + level++; + node = mas_mn(mas); + mt = mte_node_type(mas->node); + pivots = ma_pivots(node, mt); + } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); + + slots = ma_slots(node, mt); + pivot = mas_safe_pivot(mas, pivots, ++offset, mt); + while (unlikely(level > 1)) { + /* Descend, if necessary */ + enode = mas_slot(mas, slots, offset); + if (unlikely(ma_dead_node(node))) + return 1; + + mas->node = enode; + level--; + node = mas_mn(mas); + mt = mte_node_type(mas->node); + slots = ma_slots(node, mt); + pivots = ma_pivots(node, mt); + offset = 0; + pivot = pivots[0]; + } + + enode = mas_slot(mas, slots, offset); + if (unlikely(ma_dead_node(node))) + return 1; + + mas->node = enode; + mas->min = min; + mas->max = pivot; + return 0; + +no_entry: + if (unlikely(ma_dead_node(node))) + return 1; + + mas->node = MAS_NONE; + return 0; +} + +/* + * mas_next_nentry() - Get the next node entry + * @mas: The maple state + * @max: The maximum value to check + * @*range_start: Pointer to store the start of the range. + * + * Sets @mas->offset to the offset of the next node entry, @mas->last to the + * pivot of the entry. + * + * Return: The next entry, %NULL otherwise + */ +static inline void *mas_next_nentry(struct ma_state *mas, + struct maple_node *node, unsigned long max, enum maple_type type) +{ + unsigned char count; + unsigned long pivot; + unsigned long *pivots; + void __rcu **slots; + void *entry; + + if (mas->last == mas->max) { + mas->index = mas->max; + return NULL; + } + + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + mas->index = mas_safe_min(mas, pivots, mas->offset); + if (ma_dead_node(node)) + return NULL; + + if (mas->index > max) + return NULL; + + count = ma_data_end(node, type, pivots, mas->max); + if (mas->offset > count) + return NULL; + + while (mas->offset < count) { + pivot = pivots[mas->offset]; + entry = mas_slot(mas, slots, mas->offset); + if (ma_dead_node(node)) + return NULL; + + if (entry) + goto found; + + if (pivot >= max) + return NULL; + + mas->index = pivot + 1; + mas->offset++; + } + + if (mas->index > mas->max) { + mas->index = mas->last; + return NULL; + } + + pivot = mas_safe_pivot(mas, pivots, mas->offset, type); + entry = mas_slot(mas, slots, mas->offset); + if (ma_dead_node(node)) + return NULL; + + if (!pivot) + return NULL; + + if (!entry) + return NULL; + +found: + mas->last = pivot; + return entry; +} + +static inline void mas_rewalk(struct ma_state *mas, unsigned long index) +{ + +retry: + mas_set(mas, index); + mas_state_walk(mas); + if (mas_is_start(mas)) + goto retry; + + return; + +} + +/* + * mas_next_entry() - Internal function to get the next entry. + * @mas: The maple state + * @limit: The maximum range start. + * + * Set the @mas->node to the next entry and the range_start to + * the beginning value for the entry. Does not check beyond @limit. + * Sets @mas->index and @mas->last to the limit if it is hit. + * Restarts on dead nodes. + * + * Return: the next entry or %NULL. + */ +static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) +{ + void *entry = NULL; + struct maple_enode *prev_node; + struct maple_node *node; + unsigned char offset; + unsigned long last; + enum maple_type mt; + + last = mas->last; +retry: + offset = mas->offset; + prev_node = mas->node; + node = mas_mn(mas); + mt = mte_node_type(mas->node); + mas->offset++; + if (unlikely(mas->offset >= mt_slots[mt])) { + mas->offset = mt_slots[mt] - 1; + goto next_node; + } + + while (!mas_is_none(mas)) { + entry = mas_next_nentry(mas, node, limit, mt); + if (unlikely(ma_dead_node(node))) { + mas_rewalk(mas, last); + goto retry; + } + + if (likely(entry)) + return entry; + + if (unlikely((mas->index > limit))) + break; + +next_node: + prev_node = mas->node; + offset = mas->offset; + if (unlikely(mas_next_node(mas, node, limit))) { + mas_rewalk(mas, last); + goto retry; + } + mas->offset = 0; + node = mas_mn(mas); + mt = mte_node_type(mas->node); + } + + mas->index = mas->last = limit; + mas->offset = offset; + mas->node = prev_node; + return NULL; +} + +/* + * mas_prev_nentry() - Get the previous node entry. + * @mas: The maple state. + * @limit: The lower limit to check for a value. + * + * Return: the entry, %NULL otherwise. + */ +static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, + unsigned long index) +{ + unsigned long pivot, min; + unsigned char offset; + struct maple_node *mn; + enum maple_type mt; + unsigned long *pivots; + void __rcu **slots; + void *entry; + +retry: + if (!mas->offset) + return NULL; + + mn = mas_mn(mas); + mt = mte_node_type(mas->node); + offset = mas->offset - 1; + if (offset >= mt_slots[mt]) + offset = mt_slots[mt] - 1; + + slots = ma_slots(mn, mt); + pivots = ma_pivots(mn, mt); + if (offset == mt_pivots[mt]) + pivot = mas->max; + else + pivot = pivots[offset]; + + if (unlikely(ma_dead_node(mn))) { + mas_rewalk(mas, index); + goto retry; + } + + while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || + !pivot)) + pivot = pivots[--offset]; + + min = mas_safe_min(mas, pivots, offset); + entry = mas_slot(mas, slots, offset); + if (unlikely(ma_dead_node(mn))) { + mas_rewalk(mas, index); + goto retry; + } + + if (likely(entry)) { + mas->offset = offset; + mas->last = pivot; + mas->index = min; + } + return entry; +} + +static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) +{ + void *entry; + +retry: + while (likely(!mas_is_none(mas))) { + entry = mas_prev_nentry(mas, min, mas->index); + if (unlikely(mas->last < min)) + goto not_found; + + if (likely(entry)) + return entry; + + if (unlikely(mas_prev_node(mas, min))) { + mas_rewalk(mas, mas->index); + goto retry; + } + + mas->offset++; + } + + mas->offset--; +not_found: + mas->index = mas->last = min; + return NULL; +} + +/* + * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the + * highest gap address of a given size in a given node and descend. + * @mas: The maple state + * @size: The needed size. + * + * Return: True if found in a leaf, false otherwise. + * + */ +static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) +{ + enum maple_type type = mte_node_type(mas->node); + struct maple_node *node = mas_mn(mas); + unsigned long *pivots, *gaps; + void __rcu **slots; + unsigned long gap = 0; + unsigned long max, min, index; + unsigned char offset; + + if (unlikely(mas_is_err(mas))) + return true; + + if (ma_is_dense(type)) { + /* dense nodes. */ + mas->offset = (unsigned char)(mas->index - mas->min); + return true; + } + + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + gaps = ma_gaps(node, type); + offset = mas->offset; + min = mas_safe_min(mas, pivots, offset); + /* Skip out of bounds. */ + while (mas->last < min) + min = mas_safe_min(mas, pivots, --offset); + + max = mas_safe_pivot(mas, pivots, offset, type); + index = mas->index; + while (index <= max) { + gap = 0; + if (gaps) + gap = gaps[offset]; + else if (!mas_slot(mas, slots, offset)) + gap = max - min + 1; + + if (gap) { + if ((size <= gap) && (size <= mas->last - min + 1)) + break; + + if (!gaps) { + /* Skip the next slot, it cannot be a gap. */ + if (offset < 2) + goto ascend; + + offset -= 2; + max = pivots[offset]; + min = mas_safe_min(mas, pivots, offset); + continue; + } + } + + if (!offset) + goto ascend; + + offset--; + max = min - 1; + min = mas_safe_min(mas, pivots, offset); + } + + if (unlikely(index > max)) { + mas_set_err(mas, -EBUSY); + return false; + } + + if (unlikely(ma_is_leaf(type))) { + mas->offset = offset; + mas->min = min; + mas->max = min + gap - 1; + return true; + } + + /* descend, only happens under lock. */ + mas->node = mas_slot(mas, slots, offset); + mas->min = min; + mas->max = max; + mas->offset = mas_data_end(mas); + return false; + +ascend: + if (mte_is_root(mas->node)) + mas_set_err(mas, -EBUSY); + + return false; +} + +static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) +{ + enum maple_type type = mte_node_type(mas->node); + unsigned long pivot, min, gap = 0; + unsigned char count, offset; + unsigned long *gaps = NULL, *pivots = ma_pivots(mas_mn(mas), type); + void __rcu **slots = ma_slots(mas_mn(mas), type); + bool found = false; + + if (ma_is_dense(type)) { + mas->offset = (unsigned char)(mas->index - mas->min); + return true; + } + + gaps = ma_gaps(mte_to_node(mas->node), type); + offset = mas->offset; + count = mt_slots[type]; + min = mas_safe_min(mas, pivots, offset); + for (; offset < count; offset++) { + pivot = mas_safe_pivot(mas, pivots, offset, type); + if (offset && !pivot) + break; + + /* Not within lower bounds */ + if (mas->index > pivot) + goto next_slot; + + if (gaps) + gap = gaps[offset]; + else if (!mas_slot(mas, slots, offset)) + gap = min(pivot, mas->last) - max(mas->index, min) + 1; + else + goto next_slot; + + if (gap >= size) { + if (ma_is_leaf(type)) { + found = true; + goto done; + } + if (mas->index <= pivot) { + mas->node = mas_slot(mas, slots, offset); + mas->min = min; + mas->max = pivot; + offset = 0; + type = mte_node_type(mas->node); + count = mt_slots[type]; + break; + } + } +next_slot: + min = pivot + 1; + if (mas->last < min) { + mas_set_err(mas, -EBUSY); + return true; + } + } + + if (mte_is_root(mas->node)) + found = true; +done: + mas->offset = offset; + return found; +} + +/** + * mas_walk() - Search for @mas->index in the tree. + * @mas: The maple state. + * + * mas->index and mas->last will be set to the range if there is a value. If + * mas->node is MAS_NONE, reset to MAS_START. + * + * Return: the entry at the location or %NULL. + */ +void *mas_walk(struct ma_state *mas) +{ + void *entry; + +retry: + entry = mas_state_walk(mas); + if (mas_is_start(mas)) + goto retry; + + if (mas_is_ptr(mas)) { + if (!mas->index) { + mas->last = 0; + } else { + mas->index = 1; + mas->last = ULONG_MAX; + } + return entry; + } + + if (mas_is_none(mas)) { + mas->index = 0; + mas->last = ULONG_MAX; + } + + return entry; +} + +static inline bool mas_rewind_node(struct ma_state *mas) +{ + unsigned char slot; + + do { + if (mte_is_root(mas->node)) { + slot = mas->offset; + if (!slot) + return false; + } else { + mas_ascend(mas); + slot = mas->offset; + } + } while (!slot); + + mas->offset = --slot; + return true; +} + +/* + * mas_skip_node() - Internal function. Skip over a node. + * @mas: The maple state. + * + * Return: true if there is another node, false otherwise. + */ +static inline bool mas_skip_node(struct ma_state *mas) +{ + unsigned char slot, slot_count; + unsigned long *pivots; + enum maple_type mt; + + mt = mte_node_type(mas->node); + slot_count = mt_slots[mt] - 1; + do { + if (mte_is_root(mas->node)) { + slot = mas->offset; + if (slot > slot_count) { + mas_set_err(mas, -EBUSY); + return false; + } + } else { + mas_ascend(mas); + slot = mas->offset; + mt = mte_node_type(mas->node); + slot_count = mt_slots[mt] - 1; + } + } while (slot > slot_count); + + mas->offset = ++slot; + pivots = ma_pivots(mas_mn(mas), mt); + if (slot > 0) + mas->min = pivots[slot - 1] + 1; + + if (slot <= slot_count) + mas->max = pivots[slot]; + + return true; +} + +/* + * mas_awalk() - Allocation walk. Search from low address to high, for a gap of + * @size + * @mas: The maple state + * @size: The size of the gap required + * + * Search between @mas->index and @mas->last for a gap of @size. + */ +static inline void mas_awalk(struct ma_state *mas, unsigned long size) +{ + struct maple_enode *last = NULL; + + /* + * There are 4 options: + * go to child (descend) + * go back to parent (ascend) + * no gap found. (return, slot == MAPLE_NODE_SLOTS) + * found the gap. (return, slot != MAPLE_NODE_SLOTS) + */ + while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { + if (last == mas->node) + mas_skip_node(mas); + else + last = mas->node; + } +} + +/* + * mas_fill_gap() - Fill a located gap with @entry. + * @mas: The maple state + * @entry: The value to store + * @slot: The offset into the node to store the @entry + * @size: The size of the entry + * @index: The start location + */ +static inline void mas_fill_gap(struct ma_state *mas, void *entry, + unsigned char slot, unsigned long size, unsigned long *index) +{ + MA_WR_STATE(wr_mas, mas, entry); + unsigned char pslot = mte_parent_slot(mas->node); + struct maple_enode *mn = mas->node; + unsigned long *pivots; + enum maple_type ptype; + /* + * mas->index is the start address for the search + * which may no longer be needed. + * mas->last is the end address for the search + */ + + *index = mas->index; + mas->last = mas->index + size - 1; + + /* + * It is possible that using mas->max and mas->min to correctly + * calculate the index and last will cause an issue in the gap + * calculation, so fix the ma_state here + */ + mas_ascend(mas); + ptype = mte_node_type(mas->node); + pivots = ma_pivots(mas_mn(mas), ptype); + mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); + mas->min = mas_safe_min(mas, pivots, pslot); + mas->node = mn; + mas->offset = slot; + mas_wr_store_entry(&wr_mas); +} + +/* + * mas_sparse_area() - Internal function. Return upper or lower limit when + * searching for a gap in an empty tree. + * @mas: The maple state + * @min: the minimum range + * @max: The maximum range + * @size: The size of the gap + * @fwd: Searching forward or back + */ +static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size, bool fwd) +{ + unsigned long start = 0; + + if (!unlikely(mas_is_none(mas))) + start++; + /* mas_is_ptr */ + + if (start < min) + start = min; + + if (fwd) { + mas->index = start; + mas->last = start + size - 1; + return; + } + + mas->index = max; +} + +/* + * mas_empty_area() - Get the lowest address within the range that is + * sufficient for the size requested. + * @mas: The maple state + * @min: The lowest value of the range + * @max: The highest value of the range + * @size: The size needed + */ +int mas_empty_area(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size) +{ + unsigned char offset; + unsigned long *pivots; + enum maple_type mt; + + if (mas_is_start(mas)) + mas_start(mas); + else if (mas->offset >= 2) + mas->offset -= 2; + else if (!mas_skip_node(mas)) + return -EBUSY; + + /* Empty set */ + if (mas_is_none(mas) || mas_is_ptr(mas)) { + mas_sparse_area(mas, min, max, size, true); + return 0; + } + + /* The start of the window can only be within these values */ + mas->index = min; + mas->last = max; + mas_awalk(mas, size); + + if (unlikely(mas_is_err(mas))) + return xa_err(mas->node); + + offset = mas->offset; + if (unlikely(offset == MAPLE_NODE_SLOTS)) + return -EBUSY; + + mt = mte_node_type(mas->node); + pivots = ma_pivots(mas_mn(mas), mt); + if (offset) + mas->min = pivots[offset - 1] + 1; + + if (offset < mt_pivots[mt]) + mas->max = pivots[offset]; + + if (mas->index < mas->min) + mas->index = mas->min; + + mas->last = mas->index + size - 1; + return 0; +} + +/* + * mas_empty_area_rev() - Get the highest address within the range that is + * sufficient for the size requested. + * @mas: The maple state + * @min: The lowest value of the range + * @max: The highest value of the range + * @size: The size needed + */ +int mas_empty_area_rev(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size) +{ + struct maple_enode *last = mas->node; + + if (mas_is_start(mas)) { + mas_start(mas); + mas->offset = mas_data_end(mas); + } else if (mas->offset >= 2) { + mas->offset -= 2; + } else if (!mas_rewind_node(mas)) { + return -EBUSY; + } + + /* Empty set. */ + if (mas_is_none(mas) || mas_is_ptr(mas)) { + mas_sparse_area(mas, min, max, size, false); + return 0; + } + + /* The start of the window can only be within these values. */ + mas->index = min; + mas->last = max; + + while (!mas_rev_awalk(mas, size)) { + if (last == mas->node) { + if (!mas_rewind_node(mas)) + return -EBUSY; + } else { + last = mas->node; + } + } + + if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) + return -EBUSY; + + /* + * mas_rev_awalk() has set mas->min and mas->max to the gap values. If + * the maximum is outside the window we are searching, then use the last + * location in the search. + * mas->max and mas->min is the range of the gap. + * mas->index and mas->last are currently set to the search range. + */ + + /* Trim the upper limit to the max. */ + if (mas->max <= mas->last) + mas->last = mas->max; + + mas->index = mas->last - size + 1; + return 0; +} + +static inline int mas_alloc(struct ma_state *mas, void *entry, + unsigned long size, unsigned long *index) +{ + unsigned long min; + + mas_start(mas); + if (mas_is_none(mas) || mas_is_ptr(mas)) { + mas_root_expand(mas, entry); + if (mas_is_err(mas)) + return xa_err(mas->node); + + if (!mas->index) + return mte_pivot(mas->node, 0); + return mte_pivot(mas->node, 1); + } + + /* Must be walking a tree. */ + mas_awalk(mas, size); + if (mas_is_err(mas)) + return xa_err(mas->node); + + if (mas->offset == MAPLE_NODE_SLOTS) + goto no_gap; + + /* + * At this point, mas->node points to the right node and we have an + * offset that has a sufficient gap. + */ + min = mas->min; + if (mas->offset) + min = mte_pivot(mas->node, mas->offset - 1) + 1; + + if (mas->index < min) + mas->index = min; + + mas_fill_gap(mas, entry, mas->offset, size, index); + return 0; + +no_gap: + return -EBUSY; +} + +static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, + unsigned long max, void *entry, + unsigned long size, unsigned long *index) +{ + int ret = 0; + + ret = mas_empty_area_rev(mas, min, max, size); + if (ret) + return ret; + + if (mas_is_err(mas)) + return xa_err(mas->node); + + if (mas->offset == MAPLE_NODE_SLOTS) + goto no_gap; + + mas_fill_gap(mas, entry, mas->offset, size, index); + return 0; + +no_gap: + return -EBUSY; +} + +/* + * mas_dead_leaves() - Mark all leaves of a node as dead. + * @mas: The maple state + * @slots: Pointer to the slot array + * + * Must hold the write lock. + * + * Return: The number of leaves marked as dead. + */ +static inline +unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots) +{ + struct maple_node *node; + enum maple_type type; + void *entry; + int offset; + + for (offset = 0; offset < mt_slot_count(mas->node); offset++) { + entry = mas_slot_locked(mas, slots, offset); + type = mte_node_type(entry); + node = mte_to_node(entry); + /* Use both node and type to catch LE & BE metadata */ + if (!node || !type) + break; + + mte_set_node_dead(entry); + smp_wmb(); /* Needed for RCU */ + node->type = type; + rcu_assign_pointer(slots[offset], node); + } + + return offset; +} + +static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset) +{ + struct maple_node *node, *next; + void __rcu **slots = NULL; + + next = mas_mn(mas); + do { + mas->node = ma_enode_ptr(next); + node = mas_mn(mas); + slots = ma_slots(node, node->type); + next = mas_slot_locked(mas, slots, offset); + offset = 0; + } while (!ma_is_leaf(next->type)); + + return slots; +} + +static void mt_free_walk(struct rcu_head *head) +{ + void __rcu **slots; + struct maple_node *node, *start; + struct maple_tree mt; + unsigned char offset; + enum maple_type type; + MA_STATE(mas, &mt, 0, 0); + + node = container_of(head, struct maple_node, rcu); + + if (ma_is_leaf(node->type)) + goto free_leaf; + + mt_init_flags(&mt, node->ma_flags); + mas_lock(&mas); + start = node; + mas.node = mt_mk_node(node, node->type); + slots = mas_dead_walk(&mas, 0); + node = mas_mn(&mas); + do { + mt_free_bulk(node->slot_len, slots); + offset = node->parent_slot + 1; + mas.node = node->piv_parent; + if (mas_mn(&mas) == node) + goto start_slots_free; + + type = mte_node_type(mas.node); + slots = ma_slots(mte_to_node(mas.node), type); + if ((offset < mt_slots[type]) && (slots[offset])) + slots = mas_dead_walk(&mas, offset); + + node = mas_mn(&mas); + } while ((node != start) || (node->slot_len < offset)); + + slots = ma_slots(node, node->type); + mt_free_bulk(node->slot_len, slots); + +start_slots_free: +free_leaf: + mt_free_rcu(&node->rcu); +} + +static inline void __rcu **mas_destroy_descend(struct ma_state *mas, + struct maple_enode *prev, unsigned char offset) +{ + struct maple_node *node; + struct maple_enode *next = mas->node; + void __rcu **slots = NULL; + + do { + mas->node = next; + node = mas_mn(mas); + slots = ma_slots(node, mte_node_type(mas->node)); + next = mas_slot_locked(mas, slots, 0); + if ((mte_dead_node(next))) + next = mas_slot_locked(mas, slots, 1); + + mte_set_node_dead(mas->node); + node->type = mte_node_type(mas->node); + node->piv_parent = prev; + node->parent_slot = offset; + offset = 0; + prev = mas->node; + } while (!mte_is_leaf(next)); + + return slots; +} + +static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags, + bool free) +{ + void __rcu **slots; + struct maple_node *node = mte_to_node(enode); + struct maple_enode *start; + struct maple_tree mt; + + MA_STATE(mas, &mt, 0, 0); + + if (mte_is_leaf(enode)) + goto free_leaf; + + mt_init_flags(&mt, ma_flags); + mas_lock(&mas); + + mas.node = start = enode; + slots = mas_destroy_descend(&mas, start, 0); + node = mas_mn(&mas); + do { + enum maple_type type; + unsigned char offset; + + node->slot_len = mas_dead_leaves(&mas, slots); + if (free) + mt_free_bulk(node->slot_len, slots); + offset = node->parent_slot + 1; + mas.node = node->piv_parent; + if (mas_mn(&mas) == node) + goto start_slots_free; + + type = mte_node_type(mas.node); + slots = ma_slots(mte_to_node(mas.node), type); + if ((offset < mt_slots[type]) && mte_node_type(slots[offset]) && + mte_to_node(slots[offset])) { + struct maple_enode *parent = mas.node; + + mas.node = mas_slot_locked(&mas, slots, offset); + slots = mas_destroy_descend(&mas, parent, offset); + } + node = mas_mn(&mas); + } while (start != mas.node); + + node = mas_mn(&mas); + node->slot_len = mas_dead_leaves(&mas, slots); + if (free) + mt_free_bulk(node->slot_len, slots); + +start_slots_free: + mas_unlock(&mas); + +free_leaf: + if (free) + mt_free_rcu(&node->rcu); +} + +/* + * mte_destroy_walk() - Free a tree or sub-tree. + * @enode - the encoded maple node (maple_enode) to start + * @mn - the tree to free - needed for node types. + * + * Must hold the write lock. + */ +static inline void mte_destroy_walk(struct maple_enode *enode, + struct maple_tree *mt) +{ + struct maple_node *node = mte_to_node(enode); + + if (mt_in_rcu(mt)) { + mt_destroy_walk(enode, mt->ma_flags, false); + call_rcu(&node->rcu, mt_free_walk); + } else { + mt_destroy_walk(enode, mt->ma_flags, true); + } +} + +static void mas_wr_store_setup(struct ma_wr_state *wr_mas) +{ + if (!mas_is_start(wr_mas->mas)) { + if (mas_is_none(wr_mas->mas)) { + mas_reset(wr_mas->mas); + } else { + wr_mas->r_max = wr_mas->mas->max; + wr_mas->type = mte_node_type(wr_mas->mas->node); + if (mas_is_span_wr(wr_mas)) + mas_reset(wr_mas->mas); + } + } + +} + +/* Interface */ + +/** + * mas_store() - Store an @entry. + * @mas: The maple state. + * @entry: The entry to store. + * + * The @mas->index and @mas->last is used to set the range for the @entry. + * Note: The @mas should have pre-allocated entries to ensure there is memory to + * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. + * + * Return: the first entry between mas->index and mas->last or %NULL. + */ +void *mas_store(struct ma_state *mas, void *entry) +{ + MA_WR_STATE(wr_mas, mas, entry); + + trace_ma_write(__func__, mas, 0, entry); +#ifdef CONFIG_DEBUG_MAPLE_TREE + if (mas->index > mas->last) + pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); + MT_BUG_ON(mas->tree, mas->index > mas->last); + if (mas->index > mas->last) { + mas_set_err(mas, -EINVAL); + return NULL; + } + +#endif + + /* + * Storing is the same operation as insert with the added caveat that it + * can overwrite entries. Although this seems simple enough, one may + * want to examine what happens if a single store operation was to + * overwrite multiple entries within a self-balancing B-Tree. + */ + mas_wr_store_setup(&wr_mas); + mas_wr_store_entry(&wr_mas); + return wr_mas.content; +} + +/** + * mas_store_gfp() - Store a value into the tree. + * @mas: The maple state + * @entry: The entry to store + * @gfp: The GFP_FLAGS to use for allocations if necessary. + * + * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not + * be allocated. + */ +int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) +{ + MA_WR_STATE(wr_mas, mas, entry); + + mas_wr_store_setup(&wr_mas); + trace_ma_write(__func__, mas, 0, entry); +retry: + mas_wr_store_entry(&wr_mas); + if (unlikely(mas_nomem(mas, gfp))) + goto retry; + + if (unlikely(mas_is_err(mas))) + return xa_err(mas->node); + + return 0; +} + +/** + * mas_store_prealloc() - Store a value into the tree using memory + * preallocated in the maple state. + * @mas: The maple state + * @entry: The entry to store. + */ +void mas_store_prealloc(struct ma_state *mas, void *entry) +{ + MA_WR_STATE(wr_mas, mas, entry); + + mas_wr_store_setup(&wr_mas); + trace_ma_write(__func__, mas, 0, entry); + mas_wr_store_entry(&wr_mas); + BUG_ON(mas_is_err(mas)); + mas_destroy(mas); +} + +/** + * mas_preallocate() - Preallocate enough nodes for a store operation + * @mas: The maple state + * @entry: The entry that will be stored + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated. + */ +int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) +{ + int ret; + + mas_set_alloc_req(mas, 1 + mas_mt_height(mas) * 3); + mas_alloc_nodes(mas, gfp); + if (likely(!mas_is_err(mas))) + return 0; + + mas_set_alloc_req(mas, 0); + mas_destroy(mas); + ret = xa_err(mas->node); + mas->node = MAS_START; + return ret; +} + +/* + * mas_expected_entries() - Set the expected number of entries that will be inserted. + * @mas: The maple state + * @nr_entries: The number of expected entries. + * + * This will attempt to pre-allocate enough nodes to store the expected number + * of entries. The allocations will occur using the bulk allocator interface + * for speed. Please call mas_destroy() on the @mas after inserting the entries + * to ensure any unused nodes are freed. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated. + */ +int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) +{ + int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; + struct maple_enode *enode = mas->node; + int nr_nodes; + int ret; + + /* + * Sometimes it is necessary to duplicate a tree to a new tree, such as + * forking a process and duplicating the VMAs from one tree to a new + * tree. When such a situation arises, it is known that the new tree is + * not going to be used until the entire tree is populated. For + * performance reasons, it is best to use a bulk load with RCU disabled. + * This allows for optimistic splitting that favours the left and reuse + * of nodes during the operation. + */ + + /* Optimize splitting for bulk insert in-order */ + mas->mas_flags |= MA_STATE_BULK; + + /* + * Avoid overflow, assume a gap between each entry and a trailing null. + * If this is wrong, it just means allocation can happen during + * insertion of entries. + */ + nr_nodes = max(nr_entries, nr_entries * 2 + 1); + + if (!mt_is_alloc(mas->tree)) + nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; + + /* Leaves */ + nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 1); + /* Internal nodes */ + nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); + mas_node_count(mas, nr_nodes); + + if (!mas_is_err(mas)) + return 0; + + ret = xa_err(mas->node); + mas->node = enode; + return ret; + +} + +/* + * mas_destroy() - destroy a maple state. + * @mas: The maple state + * + * Upon completion, check the left-most node and rebalance against the node to + * the right if necessary. Frees any allocated nodes associated with this maple + * state. + */ +void mas_destroy(struct ma_state *mas) +{ + struct maple_alloc *node; + + /* + * When using mas_for_each() to insert an expected number of elements, + * it is possible that the number inserted is less than the expected + * number. To fix an invalid final node, a check is performed here to + * rebalance the previous node with the final node. + */ + if (mas->mas_flags & MA_STATE_REBALANCE) { + unsigned char end; + + if (mas_is_start(mas)) + mas_start(mas); + + mtree_range_walk(mas); + end = mas_data_end(mas) + 1; + if (end < mt_min_slot_count(mas->node) - 1) + mas_destroy_rebalance(mas, end); + + mas->mas_flags &= ~MA_STATE_REBALANCE; + } + mas->mas_flags &= ~MA_STATE_BULK; + + while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) { + node = mas->alloc; + mas->alloc = node->slot[0]; + if (node->node_count > 0) + mt_free_bulk(node->node_count, + (void __rcu **)&node->slot[1]); + kmem_cache_free(maple_node_cache, node); + } + mas->alloc = NULL; +} + +/** + * mas_next() - Get the next entry. + * @mas: The maple state + * @max: The maximum index to check. + * + * Returns the next entry after @mas->index. + * Must hold rcu_read_lock or the write lock. + * Can return the zero entry. + * + * Return: The next entry or %NULL + */ +void *mas_next(struct ma_state *mas, unsigned long max) +{ + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; + + if (mas_is_start(mas)) + mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + + if (mas_is_ptr(mas)) { + if (!mas->index) { + mas->index = 1; + mas->last = ULONG_MAX; + } + return NULL; + } + + if (mas->last == ULONG_MAX) + return NULL; + + /* Retries on dead nodes handled by mas_next_entry */ + return mas_next_entry(mas, max); +} +EXPORT_SYMBOL_GPL(mas_next); + +/** + * mt_next() - get the next value in the maple tree + * @mt: The maple tree + * @index: The start index + * @max: The maximum index to check + * + * Return: The entry at @index or higher, or %NULL if nothing is found. + */ +void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) +{ + void *entry = NULL; + MA_STATE(mas, mt, index, index); + + rcu_read_lock(); + entry = mas_next(&mas, max); + rcu_read_unlock(); + return entry; +} +EXPORT_SYMBOL_GPL(mt_next); + +/** + * mas_prev() - Get the previous entry + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * searchable nodes. + * + * Return: the previous value or %NULL. + */ +void *mas_prev(struct ma_state *mas, unsigned long min) +{ + if (!mas->index) { + /* Nothing comes before 0 */ + mas->last = 0; + return NULL; + } + + if (unlikely(mas_is_ptr(mas))) + return NULL; + + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; + + if (mas_is_start(mas)) { + mas_walk(mas); + if (!mas->index) + return NULL; + } + + if (mas_is_ptr(mas)) { + if (!mas->index) { + mas->last = 0; + return NULL; + } + + mas->index = mas->last = 0; + return mas_root_locked(mas); + } + return mas_prev_entry(mas, min); +} +EXPORT_SYMBOL_GPL(mas_prev); + +/** + * mt_prev() - get the previous value in the maple tree + * @mt: The maple tree + * @index: The start index + * @min: The minimum index to check + * + * Return: The entry at @index or lower, or %NULL if nothing is found. + */ +void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) +{ + void *entry = NULL; + MA_STATE(mas, mt, index, index); + + rcu_read_lock(); + entry = mas_prev(&mas, min); + rcu_read_unlock(); + return entry; +} +EXPORT_SYMBOL_GPL(mt_prev); + +/** + * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. + * @mas: The maple state to pause + * + * Some users need to pause a walk and drop the lock they're holding in + * order to yield to a higher priority thread or carry out an operation + * on an entry. Those users should call this function before they drop + * the lock. It resets the @mas to be suitable for the next iteration + * of the loop after the user has reacquired the lock. If most entries + * found during a walk require you to call mas_pause(), the mt_for_each() + * iterator may be more appropriate. + * + */ +void mas_pause(struct ma_state *mas) +{ + mas->node = MAS_PAUSE; +} +EXPORT_SYMBOL_GPL(mas_pause); + +/** + * mas_find() - On the first call, find the entry at or after mas->index up to + * %max. Otherwise, find the entry after mas->index. + * @mas: The maple state + * @max: The maximum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find(struct ma_state *mas, unsigned long max) +{ + if (unlikely(mas_is_paused(mas))) { + if (unlikely(mas->last == ULONG_MAX)) { + mas->node = MAS_NONE; + return NULL; + } + mas->node = MAS_START; + mas->index = ++mas->last; + } + + if (unlikely(mas_is_start(mas))) { + /* First run or continue */ + void *entry; + + if (mas->index > max) + return NULL; + + entry = mas_walk(mas); + if (entry) + return entry; + } + + if (unlikely(!mas_searchable(mas))) + return NULL; + + /* Retries on dead nodes handled by mas_next_entry */ + return mas_next_entry(mas, max); +} + +/** + * mas_find_rev: On the first call, find the first non-null entry at or below + * mas->index down to %min. Otherwise find the first non-null entry below + * mas->index down to %min. + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find_rev(struct ma_state *mas, unsigned long min) +{ + if (unlikely(mas_is_paused(mas))) { + if (unlikely(mas->last == ULONG_MAX)) { + mas->node = MAS_NONE; + return NULL; + } + mas->node = MAS_START; + mas->last = --mas->index; + } + + if (unlikely(mas_is_start(mas))) { + /* First run or continue */ + void *entry; + + if (mas->index < min) + return NULL; + + entry = mas_walk(mas); + if (entry) + return entry; + } + + if (unlikely(!mas_searchable(mas))) + return NULL; + + if (mas->index < min) + return NULL; + + /* Retries on dead nodes handled by mas_next_entry */ + return mas_prev_entry(mas, min); +} +EXPORT_SYMBOL_GPL(mas_find); + +/** + * mas_erase() - Find the range in which index resides and erase the entire + * range. + * @mas: The maple state + * + * Must hold the write lock. + * Searches for @mas->index, sets @mas->index and @mas->last to the range and + * erases that range. + * + * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. + */ +void *mas_erase(struct ma_state *mas) +{ + void *entry; + MA_WR_STATE(wr_mas, mas, NULL); + + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; + + /* Retry unnecessary when holding the write lock. */ + entry = mas_state_walk(mas); + if (!entry) + return NULL; + +write_retry: + /* Must reset to ensure spanning writes of last slot are detected */ + mas_reset(mas); + mas_wr_store_setup(&wr_mas); + mas_wr_store_entry(&wr_mas); + if (mas_nomem(mas, GFP_KERNEL)) + goto write_retry; + + return entry; +} +EXPORT_SYMBOL_GPL(mas_erase); + +/** + * mas_nomem() - Check if there was an error allocating and do the allocation + * if necessary If there are allocations, then free them. + * @mas: The maple state + * @gfp: The GFP_FLAGS to use for allocations + * Return: true on allocation, false otherwise. + */ +bool mas_nomem(struct ma_state *mas, gfp_t gfp) + __must_hold(mas->tree->lock) +{ + if (likely(mas->node != MA_ERROR(-ENOMEM))) { + mas_destroy(mas); + return false; + } + + if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { + mtree_unlock(mas->tree); + mas_alloc_nodes(mas, gfp); + mtree_lock(mas->tree); + } else { + mas_alloc_nodes(mas, gfp); + } + + if (!mas_allocated(mas)) + return false; + + mas->node = MAS_START; + return true; +} + +void __init maple_tree_init(void) +{ + maple_node_cache = kmem_cache_create("maple_node", + sizeof(struct maple_node), sizeof(struct maple_node), + SLAB_PANIC, NULL); +} + +/** + * mtree_load() - Load a value stored in a maple tree + * @mt: The maple tree + * @index: The index to load + * + * Return: the entry or %NULL + */ +void *mtree_load(struct maple_tree *mt, unsigned long index) +{ + MA_STATE(mas, mt, index, index); + void *entry; + + trace_ma_read(__func__, &mas); + rcu_read_lock(); +retry: + entry = mas_start(&mas); + if (unlikely(mas_is_none(&mas))) + goto unlock; + + if (unlikely(mas_is_ptr(&mas))) { + if (index) + entry = NULL; + + goto unlock; + } + + entry = mtree_lookup_walk(&mas); + if (!entry && unlikely(mas_is_start(&mas))) + goto retry; +unlock: + rcu_read_unlock(); + if (xa_is_zero(entry)) + return NULL; + + return entry; +} +EXPORT_SYMBOL(mtree_load); + +/** + * mtree_store_range() - Store an entry at a given range. + * @mt: The maple tree + * @index: The start of the range + * @last: The end of the range + * @entry: The entry to store + * @gfp: The GFP_FLAGS to use for allocations + * + * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not + * be allocated. + */ +int mtree_store_range(struct maple_tree *mt, unsigned long index, + unsigned long last, void *entry, gfp_t gfp) +{ + MA_STATE(mas, mt, index, last); + MA_WR_STATE(wr_mas, &mas, entry); + + trace_ma_write(__func__, &mas, 0, entry); + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + + if (index > last) + return -EINVAL; + + mtree_lock(mt); +retry: + mas_wr_store_entry(&wr_mas); + if (mas_nomem(&mas, gfp)) + goto retry; + + mtree_unlock(mt); + if (mas_is_err(&mas)) + return xa_err(mas.node); + + return 0; +} +EXPORT_SYMBOL(mtree_store_range); + +/** + * mtree_store() - Store an entry at a given index. + * @mt: The maple tree + * @index: The index to store the value + * @entry: The entry to store + * @gfp: The GFP_FLAGS to use for allocations + * + * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not + * be allocated. + */ +int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, + gfp_t gfp) +{ + return mtree_store_range(mt, index, index, entry, gfp); +} +EXPORT_SYMBOL(mtree_store); + +/** + * mtree_insert_range() - Insert an entry at a give range if there is no value. + * @mt: The maple tree + * @first: The start of the range + * @last: The end of the range + * @entry: The entry to store + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid + * request, -ENOMEM if memory could not be allocated. + */ +int mtree_insert_range(struct maple_tree *mt, unsigned long first, + unsigned long last, void *entry, gfp_t gfp) +{ + MA_STATE(ms, mt, first, last); + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + + if (first > last) + return -EINVAL; + + mtree_lock(mt); +retry: + mas_insert(&ms, entry); + if (mas_nomem(&ms, gfp)) + goto retry; + + mtree_unlock(mt); + if (mas_is_err(&ms)) + return xa_err(ms.node); + + return 0; +} +EXPORT_SYMBOL(mtree_insert_range); + +/** + * mtree_insert() - Insert an entry at a give index if there is no value. + * @mt: The maple tree + * @index : The index to store the value + * @entry: The entry to store + * @gfp: The FGP_FLAGS to use for allocations. + * + * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid + * request, -ENOMEM if memory could not be allocated. + */ +int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, + gfp_t gfp) +{ + return mtree_insert_range(mt, index, index, entry, gfp); +} +EXPORT_SYMBOL(mtree_insert); + +int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long size, unsigned long min, + unsigned long max, gfp_t gfp) +{ + int ret = 0; + + MA_STATE(mas, mt, min, max - size); + if (!mt_is_alloc(mt)) + return -EINVAL; + + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + + if (min > max) + return -EINVAL; + + if (max < size) + return -EINVAL; + + if (!size) + return -EINVAL; + + mtree_lock(mt); +retry: + mas.offset = 0; + mas.index = min; + mas.last = max - size; + ret = mas_alloc(&mas, entry, size, startp); + if (mas_nomem(&mas, gfp)) + goto retry; + + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_range); + +int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long size, unsigned long min, + unsigned long max, gfp_t gfp) +{ + int ret = 0; + + MA_STATE(mas, mt, min, max - size); + if (!mt_is_alloc(mt)) + return -EINVAL; + + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + + if (min >= max) + return -EINVAL; + + if (max < size - 1) + return -EINVAL; + + if (!size) + return -EINVAL; + + mtree_lock(mt); +retry: + ret = mas_rev_alloc(&mas, min, max, entry, size, startp); + if (mas_nomem(&mas, gfp)) + goto retry; + + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_rrange); + +/** + * mtree_erase() - Find an index and erase the entire range. + * @mt: The maple tree + * @index: The index to erase + * + * Erasing is the same as a walk to an entry then a store of a NULL to that + * ENTIRE range. In fact, it is implemented as such using the advanced API. + * + * Return: The entry stored at the @index or %NULL + */ +void *mtree_erase(struct maple_tree *mt, unsigned long index) +{ + void *entry = NULL; + + MA_STATE(mas, mt, index, index); + trace_ma_op(__func__, &mas); + + mtree_lock(mt); + entry = mas_erase(&mas); + mtree_unlock(mt); + + return entry; +} +EXPORT_SYMBOL(mtree_erase); + +/** + * __mt_destroy() - Walk and free all nodes of a locked maple tree. + * @mt: The maple tree + * + * Note: Does not handle locking. + */ +void __mt_destroy(struct maple_tree *mt) +{ + void *root = mt_root_locked(mt); + + rcu_assign_pointer(mt->ma_root, NULL); + if (xa_is_node(root)) + mte_destroy_walk(root, mt); + + mt->ma_flags = 0; +} +EXPORT_SYMBOL_GPL(__mt_destroy); + +/** + * mtree_destroy() - Destroy a maple tree + * @mt: The maple tree + * + * Frees all resources used by the tree. Handles locking. + */ +void mtree_destroy(struct maple_tree *mt) +{ + mtree_lock(mt); + __mt_destroy(mt); + mtree_unlock(mt); +} +EXPORT_SYMBOL(mtree_destroy); + +/** + * mt_find() - Search from the start up until an entry is found. + * @mt: The maple tree + * @index: Pointer which contains the start location of the search + * @max: The maximum value to check + * + * Handles locking. @index will be incremented to one beyond the range. + * + * Return: The entry at or after the @index or %NULL + */ +void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) +{ + MA_STATE(mas, mt, *index, *index); + void *entry; +#ifdef CONFIG_DEBUG_MAPLE_TREE + unsigned long copy = *index; +#endif + + trace_ma_read(__func__, &mas); + + if ((*index) > max) + return NULL; + + rcu_read_lock(); +retry: + entry = mas_state_walk(&mas); + if (mas_is_start(&mas)) + goto retry; + + if (unlikely(xa_is_zero(entry))) + entry = NULL; + + if (entry) + goto unlock; + + while (mas_searchable(&mas) && (mas.index < max)) { + entry = mas_next_entry(&mas, max); + if (likely(entry && !xa_is_zero(entry))) + break; + } + + if (unlikely(xa_is_zero(entry))) + entry = NULL; +unlock: + rcu_read_unlock(); + if (likely(entry)) { + *index = mas.last + 1; +#ifdef CONFIG_DEBUG_MAPLE_TREE + if ((*index) && (*index) <= copy) + pr_err("index not increased! %lx <= %lx\n", + *index, copy); + MT_BUG_ON(mt, (*index) && ((*index) <= copy)); +#endif + } + + return entry; +} +EXPORT_SYMBOL(mt_find); + +/** + * mt_find_after() - Search from the start up until an entry is found. + * @mt: The maple tree + * @index: Pointer which contains the start location of the search + * @max: The maximum value to check + * + * Handles locking, detects wrapping on index == 0 + * + * Return: The entry at or after the @index or %NULL + */ +void *mt_find_after(struct maple_tree *mt, unsigned long *index, + unsigned long max) +{ + if (!(*index)) + return NULL; + + return mt_find(mt, index, max); +} +EXPORT_SYMBOL(mt_find_after); + +#ifdef CONFIG_DEBUG_MAPLE_TREE +atomic_t maple_tree_tests_run; +EXPORT_SYMBOL_GPL(maple_tree_tests_run); +atomic_t maple_tree_tests_passed; +EXPORT_SYMBOL_GPL(maple_tree_tests_passed); + +#ifndef __KERNEL__ +extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); +void mt_set_non_kernel(unsigned int val) +{ + kmem_cache_set_non_kernel(maple_node_cache, val); +} + +extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); +unsigned long mt_get_alloc_size(void) +{ + return kmem_cache_get_alloc(maple_node_cache); +} + +extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); +void mt_zero_nr_tallocated(void) +{ + kmem_cache_zero_nr_tallocated(maple_node_cache); +} + +extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); +unsigned int mt_nr_tallocated(void) +{ + return kmem_cache_nr_tallocated(maple_node_cache); +} + +extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); +unsigned int mt_nr_allocated(void) +{ + return kmem_cache_nr_allocated(maple_node_cache); +} + +/* + * mas_dead_node() - Check if the maple state is pointing to a dead node. + * @mas: The maple state + * @index: The index to restore in @mas. + * + * Used in test code. + * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. + */ +static inline int mas_dead_node(struct ma_state *mas, unsigned long index) +{ + if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) + return 0; + + if (likely(!mte_dead_node(mas->node))) + return 0; + + mas_rewalk(mas, index); + return 1; +} +#endif /* not defined __KERNEL__ */ + +/* + * mas_get_slot() - Get the entry in the maple state node stored at @offset. + * @mas: The maple state + * @offset: The offset into the slot array to fetch. + * + * Return: The entry stored at @offset. + */ +static inline struct maple_enode *mas_get_slot(struct ma_state *mas, + unsigned char offset) +{ + return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), + offset); +} + + +/* + * mas_first_entry() - Go the first leaf and find the first entry. + * @mas: the maple state. + * @limit: the maximum index to check. + * @*r_start: Pointer to set to the range start. + * + * Sets mas->offset to the offset of the entry, r_start to the range minimum. + * + * Return: The first entry or MAS_NONE. + */ +static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, + unsigned long limit, enum maple_type mt) + +{ + unsigned long max; + unsigned long *pivots; + void __rcu **slots; + void *entry = NULL; + + mas->index = mas->min; + if (mas->index > limit) + goto none; + + max = mas->max; + mas->offset = 0; + while (likely(!ma_is_leaf(mt))) { + slots = ma_slots(mn, mt); + pivots = ma_pivots(mn, mt); + max = pivots[0]; + entry = mas_slot(mas, slots, 0); + if (unlikely(ma_dead_node(mn))) + return NULL; + mas->node = entry; + mn = mas_mn(mas); + mt = mte_node_type(mas->node); + } + + mas->max = max; + slots = ma_slots(mn, mt); + entry = mas_slot(mas, slots, 0); + if (unlikely(ma_dead_node(mn))) + return NULL; + + /* Slot 0 or 1 must be set */ + if (mas->index > limit) + goto none; + + if (likely(entry)) + return entry; + + pivots = ma_pivots(mn, mt); + mas->index = pivots[0] + 1; + mas->offset = 1; + entry = mas_slot(mas, slots, 1); + if (unlikely(ma_dead_node(mn))) + return NULL; + + if (mas->index > limit) + goto none; + + if (likely(entry)) + return entry; + +none: + if (likely(!ma_dead_node(mn))) + mas->node = MAS_NONE; + return NULL; +} + +/* Depth first search, post-order */ +static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) +{ + + struct maple_enode *p = MAS_NONE, *mn = mas->node; + unsigned long p_min, p_max; + + mas_next_node(mas, mas_mn(mas), max); + if (!mas_is_none(mas)) + return; + + if (mte_is_root(mn)) + return; + + mas->node = mn; + mas_ascend(mas); + while (mas->node != MAS_NONE) { + p = mas->node; + p_min = mas->min; + p_max = mas->max; + mas_prev_node(mas, 0); + } + + if (p == MAS_NONE) + return; + + mas->node = p; + mas->max = p_max; + mas->min = p_min; +} + +/* Tree validations */ +static void mt_dump_node(const struct maple_tree *mt, void *entry, + unsigned long min, unsigned long max, unsigned int depth); +static void mt_dump_range(unsigned long min, unsigned long max, + unsigned int depth) +{ + static const char spaces[] = " "; + + if (min == max) + pr_info("%.*s%lu: ", depth * 2, spaces, min); + else + pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); +} + +static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, + unsigned int depth) +{ + mt_dump_range(min, max, depth); + + if (xa_is_value(entry)) + pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), + xa_to_value(entry), entry); + else if (xa_is_zero(entry)) + pr_cont("zero (%ld)\n", xa_to_internal(entry)); + else if (mt_is_reserved(entry)) + pr_cont("UNKNOWN ENTRY (%p)\n", entry); + else + pr_cont("%p\n", entry); +} + +static void mt_dump_range64(const struct maple_tree *mt, void *entry, + unsigned long min, unsigned long max, unsigned int depth) +{ + struct maple_range_64 *node = &mte_to_node(entry)->mr64; + bool leaf = mte_is_leaf(entry); + unsigned long first = min; + int i; + + pr_cont(" contents: "); + for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) + pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + pr_cont("%p\n", node->slot[i]); + for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { + unsigned long last = max; + + if (i < (MAPLE_RANGE64_SLOTS - 1)) + last = node->pivot[i]; + else if (!node->slot[i] && max != mt_max[mte_node_type(entry)]) + break; + if (last == 0 && i > 0) + break; + if (leaf) + mt_dump_entry(mt_slot(mt, node->slot, i), + first, last, depth + 1); + else if (node->slot[i]) + mt_dump_node(mt, mt_slot(mt, node->slot, i), + first, last, depth + 1); + + if (last == max) + break; + if (last > max) { + pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + break; + } + first = last + 1; + } +} + +static void mt_dump_arange64(const struct maple_tree *mt, void *entry, + unsigned long min, unsigned long max, unsigned int depth) +{ + struct maple_arange_64 *node = &mte_to_node(entry)->ma64; + bool leaf = mte_is_leaf(entry); + unsigned long first = min; + int i; + + pr_cont(" contents: "); + for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) + pr_cont("%lu ", node->gap[i]); + pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); + for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) + pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + pr_cont("%p\n", node->slot[i]); + for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { + unsigned long last = max; + + if (i < (MAPLE_ARANGE64_SLOTS - 1)) + last = node->pivot[i]; + else if (!node->slot[i]) + break; + if (last == 0 && i > 0) + break; + if (leaf) + mt_dump_entry(mt_slot(mt, node->slot, i), + first, last, depth + 1); + else if (node->slot[i]) + mt_dump_node(mt, mt_slot(mt, node->slot, i), + first, last, depth + 1); + + if (last == max) + break; + if (last > max) { + pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + break; + } + first = last + 1; + } +} + +static void mt_dump_node(const struct maple_tree *mt, void *entry, + unsigned long min, unsigned long max, unsigned int depth) +{ + struct maple_node *node = mte_to_node(entry); + unsigned int type = mte_node_type(entry); + unsigned int i; + + mt_dump_range(min, max, depth); + + pr_cont("node %p depth %d type %d parent %p", node, depth, type, + node ? node->parent : NULL); + switch (type) { + case maple_dense: + pr_cont("\n"); + for (i = 0; i < MAPLE_NODE_SLOTS; i++) { + if (min + i > max) + pr_cont("OUT OF RANGE: "); + mt_dump_entry(mt_slot(mt, node->slot, i), + min + i, min + i, depth); + } + break; + case maple_leaf_64: + case maple_range_64: + mt_dump_range64(mt, entry, min, max, depth); + break; + case maple_arange_64: + mt_dump_arange64(mt, entry, min, max, depth); + break; + + default: + pr_cont(" UNKNOWN TYPE\n"); + } +} + +void mt_dump(const struct maple_tree *mt) +{ + void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); + + pr_info("maple_tree(%p) flags %X, height %u root %p\n", + mt, mt->ma_flags, mt_height(mt), entry); + if (!xa_is_node(entry)) + mt_dump_entry(entry, 0, 0, 0); + else if (entry) + mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0); +} + +/* + * Calculate the maximum gap in a node and check if that's what is reported in + * the parent (unless root). + */ +static void mas_validate_gaps(struct ma_state *mas) +{ + struct maple_enode *mte = mas->node; + struct maple_node *p_mn; + unsigned long gap = 0, max_gap = 0; + unsigned long p_end, p_start = mas->min; + unsigned char p_slot; + unsigned long *gaps = NULL; + unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); + int i; + + if (ma_is_dense(mte_node_type(mte))) { + for (i = 0; i < mt_slot_count(mte); i++) { + if (mas_get_slot(mas, i)) { + if (gap > max_gap) + max_gap = gap; + gap = 0; + continue; + } + gap++; + } + goto counted; + } + + gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); + for (i = 0; i < mt_slot_count(mte); i++) { + p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); + + if (!gaps) { + if (mas_get_slot(mas, i)) { + gap = 0; + goto not_empty; + } + + gap += p_end - p_start + 1; + } else { + void *entry = mas_get_slot(mas, i); + + gap = gaps[i]; + if (!entry) { + if (gap != p_end - p_start + 1) { + pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", + mas_mn(mas), i, + mas_get_slot(mas, i), gap, + p_end, p_start); + mt_dump(mas->tree); + + MT_BUG_ON(mas->tree, + gap != p_end - p_start + 1); + } + } else { + if (gap > p_end - p_start + 1) { + pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", + mas_mn(mas), i, gap, p_end, p_start, + p_end - p_start + 1); + MT_BUG_ON(mas->tree, + gap > p_end - p_start + 1); + } + } + } + + if (gap > max_gap) + max_gap = gap; +not_empty: + p_start = p_end + 1; + if (p_end >= mas->max) + break; + } + +counted: + if (mte_is_root(mte)) + return; + + p_slot = mte_parent_slot(mas->node); + p_mn = mte_parent(mte); + MT_BUG_ON(mas->tree, max_gap > mas->max); + if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { + pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); + mt_dump(mas->tree); + } + + MT_BUG_ON(mas->tree, + ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); +} + +static void mas_validate_parent_slot(struct ma_state *mas) +{ + struct maple_node *parent; + struct maple_enode *node; + enum maple_type p_type = mas_parent_enum(mas, mas->node); + unsigned char p_slot = mte_parent_slot(mas->node); + void __rcu **slots; + int i; + + if (mte_is_root(mas->node)) + return; + + parent = mte_parent(mas->node); + slots = ma_slots(parent, p_type); + MT_BUG_ON(mas->tree, mas_mn(mas) == parent); + + /* Check prev/next parent slot for duplicate node entry */ + + for (i = 0; i < mt_slots[p_type]; i++) { + node = mas_slot(mas, slots, i); + if (i == p_slot) { + if (node != mas->node) + pr_err("parent %p[%u] does not have %p\n", + parent, i, mas_mn(mas)); + MT_BUG_ON(mas->tree, node != mas->node); + } else if (node == mas->node) { + pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", + mas_mn(mas), parent, i, p_slot); + MT_BUG_ON(mas->tree, node == mas->node); + } + } +} + +static void mas_validate_child_slot(struct ma_state *mas) +{ + enum maple_type type = mte_node_type(mas->node); + void __rcu **slots = ma_slots(mte_to_node(mas->node), type); + unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); + struct maple_enode *child; + unsigned char i; + + if (mte_is_leaf(mas->node)) + return; + + for (i = 0; i < mt_slots[type]; i++) { + child = mas_slot(mas, slots, i); + if (!pivots[i] || pivots[i] == mas->max) + break; + + if (!child) + break; + + if (mte_parent_slot(child) != i) { + pr_err("Slot error at %p[%u]: child %p has pslot %u\n", + mas_mn(mas), i, mte_to_node(child), + mte_parent_slot(child)); + MT_BUG_ON(mas->tree, 1); + } + + if (mte_parent(child) != mte_to_node(mas->node)) { + pr_err("child %p has parent %p not %p\n", + mte_to_node(child), mte_parent(child), + mte_to_node(mas->node)); + MT_BUG_ON(mas->tree, 1); + } + } +} + +/* + * Validate all pivots are within mas->min and mas->max. + */ +static void mas_validate_limits(struct ma_state *mas) +{ + int i; + unsigned long prev_piv = 0; + void __rcu **slots = ma_slots(mte_to_node(mas->node), + mte_node_type(mas->node)); + + /* all limits are fine here. */ + if (mte_is_root(mas->node)) + return; + + for (i = 0; i < mt_slot_count(mas->node); i++) { + enum maple_type type = mte_node_type(mas->node); + unsigned long *pivots = ma_pivots(mas_mn(mas), type); + unsigned long piv = mas_safe_pivot(mas, pivots, type, i); + + if (!piv) + break; + + if (!mte_is_leaf(mas->node)) { + void *entry = mas_slot(mas, slots, i); + + if (!entry) + pr_err("%p[%u] cannot be null\n", + mas_mn(mas), i); + + MT_BUG_ON(mas->tree, !entry); + } + + if (prev_piv > piv) { + pr_err("%p[%u] piv %lu < prev_piv %lu\n", + mas_mn(mas), i, piv, prev_piv); + MT_BUG_ON(mas->tree, piv < prev_piv); + } + + if (piv < mas->min) { + pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, + piv, mas->min); + MT_BUG_ON(mas->tree, piv < mas->min); + } + if (piv > mas->max) { + pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, + piv, mas->max); + MT_BUG_ON(mas->tree, piv > mas->max); + } + prev_piv = piv; + if (piv == mas->max) + break; + } +} + +static void mt_validate_nulls(struct maple_tree *mt) +{ + void *entry, *last = (void *)1; + unsigned char offset = 0; + void __rcu **slots; + MA_STATE(mas, mt, 0, 0); + + mas_start(&mas); + if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) + return; + + while (!mte_is_leaf(mas.node)) + mas_descend(&mas); + + slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); + do { + entry = mas_slot(&mas, slots, offset); + if (!last && !entry) { + pr_err("Sequential nulls end at %p[%u]\n", + mas_mn(&mas), offset); + } + MT_BUG_ON(mt, !last && !entry); + last = entry; + if (offset == mas_data_end(&mas)) { + mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); + if (mas_is_none(&mas)) + return; + offset = 0; + slots = ma_slots(mte_to_node(mas.node), + mte_node_type(mas.node)); + } else + offset++; + + } while (!mas_is_none(&mas)); +} + +/* + * validate a maple tree by checking: + * 1. The limits (pivots are within mas->min to mas->max) + * 2. The gap is correctly set in the parents + */ +void mt_validate(struct maple_tree *mt) +{ + unsigned char end; + + MA_STATE(mas, mt, 0, 0); + rcu_read_lock(); + mas_start(&mas); + if (!mas_searchable(&mas)) + goto done; + + mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); + while (!mas_is_none(&mas)) { + MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); + if (!mte_is_root(mas.node)) { + end = mas_data_end(&mas); + if ((end < mt_min_slot_count(mas.node)) && + (mas.max != ULONG_MAX)) { + pr_err("Invalid size %u of %p\n", end, + mas_mn(&mas)); + MT_BUG_ON(mas.tree, 1); + } + + } + mas_validate_parent_slot(&mas); + mas_validate_child_slot(&mas); + mas_validate_limits(&mas); + if (mt_is_alloc(mt)) + mas_validate_gaps(&mas); + mas_dfs_postorder(&mas, ULONG_MAX); + } + mt_validate_nulls(mt); +done: + rcu_read_unlock(); + +} + +#endif /* CONFIG_DEBUG_MAPLE_TREE */ diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore index d971516401e68..c901d96dd013e 100644 --- a/tools/testing/radix-tree/.gitignore +++ b/tools/testing/radix-tree/.gitignore @@ -6,3 +6,5 @@ main multiorder radix-tree.c xarray +maple +ma_xa_benchmark diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h index 2218b3cc184e4..e7da803502362 100644 --- a/tools/testing/radix-tree/generated/autoconf.h +++ b/tools/testing/radix-tree/generated/autoconf.h @@ -1 +1,2 @@ #define CONFIG_XARRAY_MULTI 1 +#define CONFIG_64BIT 1 diff --git a/tools/testing/radix-tree/linux/maple_tree.h b/tools/testing/radix-tree/linux/maple_tree.h new file mode 100644 index 0000000000000..7d8d1f445b899 --- /dev/null +++ b/tools/testing/radix-tree/linux/maple_tree.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#define atomic_t int32_t +#include "../../../../include/linux/maple_tree.h" +#define atomic_inc(x) uatomic_inc(x) +#define atomic_read(x) uatomic_read(x) +#define atomic_set(x, y) do {} while (0) +#define U8_MAX UCHAR_MAX diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c new file mode 100644 index 0000000000000..35082671928ad --- /dev/null +++ b/tools/testing/radix-tree/maple.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * maple_tree.c: Userspace shim for maple tree test-suite + * Copyright (c) 2018 Liam R. Howlett + */ + +#define CONFIG_DEBUG_MAPLE_TREE +#define CONFIG_MAPLE_SEARCH +#include "test.h" + +#define module_init(x) +#define module_exit(x) +#define MODULE_AUTHOR(x) +#define MODULE_LICENSE(x) +#define dump_stack() assert(0) + +#include "../../../lib/maple_tree.c" +#undef CONFIG_DEBUG_MAPLE_TREE +#include "../../../lib/test_maple_tree.c" + +void farmer_tests(void) +{ + struct maple_node *node; + DEFINE_MTREE(tree); + + mt_dump(&tree); + + tree.ma_root = xa_mk_value(0); + mt_dump(&tree); + + node = mt_alloc_one(GFP_KERNEL); + node->parent = (void *)((unsigned long)(&tree) | 1); + node->slot[0] = xa_mk_value(0); + node->slot[1] = xa_mk_value(1); + node->mr64.pivot[0] = 0; + node->mr64.pivot[1] = 1; + node->mr64.pivot[2] = 0; + tree.ma_root = mt_mk_node(node, maple_leaf_64); + mt_dump(&tree); + + ma_free_rcu(node); +} + +void maple_tree_tests(void) +{ + farmer_tests(); + maple_tree_seed(); + maple_tree_harvest(); +} + +int __weak main(void) +{ + maple_tree_init(); + maple_tree_tests(); + rcu_barrier(); + if (nr_allocated) + printf("nr_allocated = %d\n", nr_allocated); + return 0; +} diff --git a/tools/testing/radix-tree/trace/events/maple_tree.h b/tools/testing/radix-tree/trace/events/maple_tree.h new file mode 100644 index 0000000000000..97d0e1ddcf08e --- /dev/null +++ b/tools/testing/radix-tree/trace/events/maple_tree.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#define trace_ma_op(a, b) do {} while (0) +#define trace_ma_read(a, b) do {} while (0) +#define trace_ma_write(a, b, c, d) do {} while (0) From fc37260603a9f3a509ff397c79e08a4b12570f4c Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Sat, 25 Jun 2022 00:39:06 +0000 Subject: [PATCH 007/321] maple_tree: fix underflow in mas_spanning_rebalance() When rebalancing a spanning store that spans the entire address space, the big node end is not expanded the extra position that mas_spanning_rebalance() expects. Fix this by expanding on such cases. Also change mab_mas_cp() from assuming there are at least two entries to ensure the correct metadata is written. Link: https://lkml.kernel.org/r/20220625003854.1230114-2-Liam.Howlett@oracle.com Fixes: f8acc5e9581e (Maple Tree: add new data structure) Signed-off-by: Liam R. Howlett Reported-by: Yu Zhao Signed-off-by: Andrew Morton --- lib/maple_tree.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index a968948b3e3e5..d8457122ca5dc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1977,7 +1977,6 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, slots[mt_pivots[mt]] = NULL; i = mab_start; - pivots[j++] = b_node->pivot[i++]; do { pivots[j++] = b_node->pivot[i++]; } while (i <= mab_end && likely(b_node->pivot[i])); @@ -2970,7 +2969,7 @@ static int mas_spanning_rebalance(struct ma_state *mas, mast->free = &free; mast->destroy = &destroy; l_mas.node = r_mas.node = m_mas.node = MAS_NONE; - if (!mas_is_root_limits(mast->orig_l) && + if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) && unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) mast_spanning_rebalance(mast); @@ -4004,6 +4003,9 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) if (r_mas.offset <= r_wr_mas.node_end) mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, &b_node, b_node.b_end + 1); + else + b_node.b_end++; + /* Stop spanning searches by searching for just index. */ l_mas.index = l_mas.last = mas->index; From 1876ee7a9daa4987dfb6dc41b77405af0daf0175 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Wed, 29 Jun 2022 15:23:50 +0000 Subject: [PATCH 008/321] maple_tree: fix mas_spanning_rebalance() corner case When a node is insufficient during a spanning rebalance loop and mast_spanning_rebalance() is called to combine neighbouring nodes, the loop should not terminate regardless of if neighbours exist or not. This will allow the data to be stored in the correct node. Link: https://lkml.kernel.org/r/20220629152340.3451959-2-Liam.Howlett@oracle.com Fixes: 37a4d714b7d9 (maple_tree: fix underflow in mas_spanning_rebalance()) Signed-off-by: Liam R. Howlett Reported-by: Yu Zhao Signed-off-by: Andrew Morton --- lib/maple_tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d8457122ca5dc..2cccd8f2153ff 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3029,8 +3029,7 @@ static int mas_spanning_rebalance(struct ma_state *mas, if (mas_is_root_limits(mast->orig_l)) break; - if (!mast_spanning_rebalance(mast)) - break; + mast_spanning_rebalance(mast); /* rebalancing from other nodes may require another loop. */ if (!count) @@ -6521,6 +6520,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, max = mas->max; mas->offset = 0; while (likely(!ma_is_leaf(mt))) { + MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); slots = ma_slots(mn, mt); pivots = ma_pivots(mn, mt); max = pivots[0]; @@ -6531,6 +6531,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, mn = mas_mn(mas); mt = mte_node_type(mas->node); } + MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); mas->max = max; slots = ma_slots(mn, mt); From 93ea6dc7955aa035fb2a9ace827530585266984a Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Wed, 6 Jul 2022 02:05:37 +0000 Subject: [PATCH 009/321] maple_tree: Fix use of node for global range in mas_wr_spanning_store() When writing a range with a NULL which expands to 0 - ULONG_MAX, don't use a node to store this value. Instead, call mas_new_root() which will set the tree pointer to NULL and free all the nodes. Fix a comment for the allocations in mas_wr_spanning_store(). Add mas_node_count_gfp() and use it to clean up mas_preallocate(). Clean up mas_preallocate() and ensure the ma_state is safe on return. Update maple_tree.h to set alloc = NULL. Link: https://lkml.kernel.org/r/20220706020526.1869453-1-Liam.Howlett@oracle.com Fixes: d0aac5e48048 (Maple Tree: add new data structure) Signed-off-by: Liam R. Howlett Signed-off-by: Andrew Morton --- include/linux/maple_tree.h | 1 + lib/maple_tree.c | 34 +++++++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 3c66423589045..2c9dede989c72 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -434,6 +434,7 @@ struct ma_wr_state { .node = MAS_START, \ .min = 0, \ .max = ULONG_MAX, \ + .alloc = NULL, \ } #define MA_WR_STATE(name, ma_state, wr_entry) \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 2cccd8f2153ff..79e07c7dc3231 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1293,17 +1293,31 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) * there is not enough nodes. * @mas: The maple state * @count: The number of nodes needed + * @gfp: the gfp flags */ -static void mas_node_count(struct ma_state *mas, int count) +static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) { unsigned long allocated = mas_allocated(mas); if (allocated < count) { mas_set_alloc_req(mas, count - allocated); - mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN); + mas_alloc_nodes(mas, gfp); } } +/* + * mas_node_count() - Check if enough nodes are allocated and request more if + * there is not enough nodes. + * @mas: The maple state + * @count: The number of nodes needed + * + * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. + */ +static void mas_node_count(struct ma_state *mas, int count) +{ + return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); +} + /* * mas_start() - Sets up maple state for operations. * @mas: The maple state. @@ -3962,7 +3976,7 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) if (unlikely(!mas->index && mas->last == ULONG_MAX)) return mas_new_root(mas, wr_mas->entry); /* - * Node rebalancing may occur due to this store, so there may be two new + * Node rebalancing may occur due to this store, so there may be three new * entries per level plus a new root. */ height = mas_mt_height(mas); @@ -3995,6 +4009,12 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) mas->last = l_mas.last = r_mas.last; } + /* expanding NULLs may make this cover the entire range */ + if (!l_mas.index && r_mas.last == ULONG_MAX) { + mas_set_range(mas, 0, ULONG_MAX); + return mas_new_root(mas, wr_mas->entry); + } + memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); @@ -5657,15 +5677,15 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) { int ret; - mas_set_alloc_req(mas, 1 + mas_mt_height(mas) * 3); - mas_alloc_nodes(mas, gfp); + mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); if (likely(!mas_is_err(mas))) return 0; mas_set_alloc_req(mas, 0); - mas_destroy(mas); ret = xa_err(mas->node); - mas->node = MAS_START; + mas_reset(mas); + mas_destroy(mas); + mas_reset(mas); return ret; } From b6cd9665fe2fcbd47ad3714c22e6013852941c85 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:48 +0000 Subject: [PATCH 010/321] radix tree test suite: add pr_err define define pr_err to printk Link: https://lkml.kernel.org/r/20220404143501.2016403-2-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220504010716.661115-4-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-3-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- tools/testing/radix-tree/linux/kernel.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 39867fd80c8fa..c5c9d05f29da9 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -14,6 +14,7 @@ #include "../../../include/linux/kconfig.h" #define printk printf +#define pr_err printk #define pr_info printk #define pr_debug printk #define pr_cont printk From 29d12590670b5243ef017fb9e5676fcc28c0fe7c Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:48 +0000 Subject: [PATCH 011/321] radix tree test suite: add kmem_cache_set_non_kernel() kmem_cache_set_non_kernel() is a mechanism to allow a certain number of kmem_cache_alloc requests to succeed even when GFP_KERNEL is not set in the flags. This functionality allows for testing different paths though the code. Link: https://lkml.kernel.org/r/20220504010716.661115-5-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-4-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- tools/testing/radix-tree/linux.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index d5c1bcba86fe0..277aa8b70abce 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -23,15 +23,26 @@ struct kmem_cache { int nr_objs; void *objs; void (*ctor)(void *); + unsigned int non_kernel; }; +void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) +{ + cachep->non_kernel = val; +} + void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, int gfp) + { void *p; - if (!(gfp & __GFP_DIRECT_RECLAIM)) - return NULL; + if (!(gfp & __GFP_DIRECT_RECLAIM)) { + if (!cachep->non_kernel) + return NULL; + + cachep->non_kernel--; + } pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs) { @@ -90,5 +101,6 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, ret->nr_objs = 0; ret->objs = NULL; ret->ctor = ctor; + ret->non_kernel = 0; return ret; } From 811d02c0855d3e1b533bf1891a34c4791787d2ed Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:48 +0000 Subject: [PATCH 012/321] radix tree test suite: add allocation counts and size to kmem_cache Add functions to get the number of allocations, and total allocations from a kmem_cache. Also add a function to get the allocated size and a way to zero the total allocations. Link: https://lkml.kernel.org/r/20220504010716.661115-6-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-5-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- tools/testing/radix-tree/linux.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 277aa8b70abce..f20529ae4dbef 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -24,6 +24,8 @@ struct kmem_cache { void *objs; void (*ctor)(void *); unsigned int non_kernel; + unsigned long nr_allocated; + unsigned long nr_tallocated; }; void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) @@ -31,9 +33,28 @@ void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) cachep->non_kernel = val; } +unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep) +{ + return cachep->size * cachep->nr_allocated; +} + +unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep) +{ + return cachep->nr_allocated; +} + +unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep) +{ + return cachep->nr_tallocated; +} + +void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep) +{ + cachep->nr_tallocated = 0; +} + void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, int gfp) - { void *p; @@ -64,7 +85,9 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, memset(p, 0, cachep->size); } + uatomic_inc(&cachep->nr_allocated); uatomic_inc(&nr_allocated); + uatomic_inc(&cachep->nr_tallocated); if (kmalloc_verbose) printf("Allocating %p from slab\n", p); return p; @@ -74,6 +97,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); + uatomic_dec(&cachep->nr_allocated); if (kmalloc_verbose) printf("Freeing %p to slab\n", objp); pthread_mutex_lock(&cachep->lock); @@ -99,6 +123,8 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, ret->size = size; ret->align = align; ret->nr_objs = 0; + ret->nr_allocated = 0; + ret->nr_tallocated = 0; ret->objs = NULL; ret->ctor = ctor; ret->non_kernel = 0; From 79080828800799ff49ea0af5a5aeb00dcf166e8e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:49 +0000 Subject: [PATCH 013/321] radix tree test suite: add support for slab bulk APIs Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the radix tree test suite. Link: https://lkml.kernel.org/r/20220504010716.661115-7-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-6-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- tools/include/linux/slab.h | 4 ++ tools/testing/radix-tree/linux.c | 118 ++++++++++++++++++++++++++++++- 2 files changed, 120 insertions(+), 2 deletions(-) diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h index 0616409513eb7..311759ea25e92 100644 --- a/tools/include/linux/slab.h +++ b/tools/include/linux/slab.h @@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, unsigned int align, unsigned int flags, void (*ctor)(void *)); +void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list); +int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, + void **list); + #endif /* _TOOLS_SLAB_H */ diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index f20529ae4dbef..2048d12c31df3 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, return p; } -void kmem_cache_free(struct kmem_cache *cachep, void *objp) +void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); uatomic_dec(&cachep->nr_allocated); if (kmalloc_verbose) printf("Freeing %p to slab\n", objp); - pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs > 10 || cachep->align) { memset(objp, POISON_FREE, cachep->size); free(objp); @@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) node->parent = cachep->objs; cachep->objs = node; } +} + +void kmem_cache_free(struct kmem_cache *cachep, void *objp) +{ + pthread_mutex_lock(&cachep->lock); + kmem_cache_free_locked(cachep, objp); pthread_mutex_unlock(&cachep->lock); } +void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) +{ + if (kmalloc_verbose) + pr_debug("Bulk free %p[0-%lu]\n", list, size - 1); + + pthread_mutex_lock(&cachep->lock); + for (int i = 0; i < size; i++) + kmem_cache_free_locked(cachep, list[i]); + pthread_mutex_unlock(&cachep->lock); +} + +int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, + void **p) +{ + size_t i; + + if (kmalloc_verbose) + pr_debug("Bulk alloc %lu\n", size); + + if (!(gfp & __GFP_DIRECT_RECLAIM)) { + if (cachep->non_kernel < size) + return 0; + + cachep->non_kernel -= size; + } + + pthread_mutex_lock(&cachep->lock); + if (cachep->nr_objs >= size) { + struct radix_tree_node *node; + + for (i = 0; i < size; i++) { + node = cachep->objs; + cachep->nr_objs--; + cachep->objs = node->parent; + p[i] = node; + node->parent = NULL; + } + pthread_mutex_unlock(&cachep->lock); + } else { + pthread_mutex_unlock(&cachep->lock); + for (i = 0; i < size; i++) { + if (cachep->align) { + posix_memalign(&p[i], cachep->align, + cachep->size * size); + } else { + p[i] = malloc(cachep->size * size); + } + if (cachep->ctor) + cachep->ctor(p[i]); + else if (gfp & __GFP_ZERO) + memset(p[i], 0, cachep->size); + } + } + + for (i = 0; i < size; i++) { + uatomic_inc(&nr_allocated); + uatomic_inc(&cachep->nr_allocated); + uatomic_inc(&cachep->nr_tallocated); + if (kmalloc_verbose) + printf("Allocating %p from slab\n", p[i]); + } + + return size; +} + struct kmem_cache * kmem_cache_create(const char *name, unsigned int size, unsigned int align, unsigned int flags, void (*ctor)(void *)) @@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, ret->non_kernel = 0; return ret; } + +/* + * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. + */ +void test_kmem_cache_bulk(void) +{ + int i; + void *list[12]; + static struct kmem_cache *test_cache, *test_cache2; + + /* + * Testing the bulk allocators without aligned kmem_cache to force the + * bulk alloc/free to reuse + */ + test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL); + + for (i = 0; i < 5; i++) + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); + + for (i = 0; i < 5; i++) + kmem_cache_free(test_cache, list[i]); + assert(test_cache->nr_objs == 5); + + kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list); + kmem_cache_free_bulk(test_cache, 5, list); + + for (i = 0; i < 12 ; i++) + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); + + for (i = 0; i < 12; i++) + kmem_cache_free(test_cache, list[i]); + + /* The last free will not be kept around */ + assert(test_cache->nr_objs == 11); + + /* Aligned caches will immediately free */ + test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL); + + kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list); + kmem_cache_free_bulk(test_cache2, 10, list); + assert(!test_cache2->nr_objs); + + +} From e2078534f76ba7fe9e63b1ee9d943f5c5df6649f Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:49 +0000 Subject: [PATCH 014/321] radix tree test suite: add lockdep_is_held to header maple tree uses lockdep_is_held, so define it as external in the header. Link: https://lkml.kernel.org/r/20220504010716.661115-8-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-7-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- tools/testing/radix-tree/linux/lockdep.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h index 016cff473cfc4..62473ab57f99c 100644 --- a/tools/testing/radix-tree/linux/lockdep.h +++ b/tools/testing/radix-tree/linux/lockdep.h @@ -11,4 +11,6 @@ static inline void lockdep_set_class(spinlock_t *lock, struct lock_class_key *key) { } + +extern int lockdep_is_held(const void *); #endif /* _LINUX_LOCKDEP_H */ From c6e1e8559dfeafda63b752f57aa4e4c3fbb451c8 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:53 +0000 Subject: [PATCH 015/321] lib/test_maple_tree: add testing for maple tree This is a test suite that uses the radix test infrastructure. It has been split into its own commit to allow for easier review of the maple tree code. Link: https://lkml.kernel.org/r/20220504010716.661115-9-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220511144304.1430851-3-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220615141921.417598-4-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-8-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 38186 ++++++++++++++++++++++++++++ tools/testing/radix-tree/Makefile | 9 +- 2 files changed, 38193 insertions(+), 2 deletions(-) create mode 100644 lib/test_maple_tree.c diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c new file mode 100644 index 0000000000000..9fc0618f4ae73 --- /dev/null +++ b/lib/test_maple_tree.c @@ -0,0 +1,38186 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * test_maple_tree.c: Test the maple tree API + * Copyright (c) 2018 Liam R. Howlett + * Author: Liam R. Howlett + */ + +#include +#include +#include +#include + +#define MTREE_ALLOC_MAX 0x2000000000000Ul +#define CONFIG_DEBUG_MAPLE_TREE +#define CONFIG_MAPLE_SEARCH +/* #define BENCH_SLOT_STORE */ +/* #define BENCH_NODE_STORE */ +/* #define BENCH_AWALK */ +/* #define BENCH_WALK */ +/* #define BENCH_MT_FOR_EACH */ +/* #define BENCH_FORK */ +static +int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) +{ + return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp); +} + +static void mtree_erase_index(struct maple_tree *mt, unsigned long index) +{ + MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX)); + MT_BUG_ON(mt, mtree_load(mt, index) != NULL); +} + +static int mtree_test_insert(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + return mtree_insert(mt, index, ptr, GFP_KERNEL); +} + +static int mtree_test_store_range(struct maple_tree *mt, unsigned long start, + unsigned long end, void *ptr) +{ + return mtree_store_range(mt, start, end, ptr, GFP_KERNEL); +} + +static int mtree_test_store(struct maple_tree *mt, unsigned long start, + void *ptr) +{ + return mtree_test_store_range(mt, start, start, ptr); +} + +static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start, + unsigned long end, void *ptr) +{ + return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL); +} + +static void *mtree_test_load(struct maple_tree *mt, unsigned long index) +{ + return mtree_load(mt, index); +} + +static void *mtree_test_erase(struct maple_tree *mt, unsigned long index) +{ + return mtree_erase(mt, index); +} + +static noinline void check_mtree_alloc_range(struct maple_tree *mt, + unsigned long start, unsigned long end, unsigned long size, + unsigned long expected, int eret, void *ptr) +{ + + unsigned long result = expected + 1; + int ret; + + ret = mtree_alloc_range(mt, &result, ptr, size, start, end, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) + return; + + MT_BUG_ON(mt, result != expected); +} + +static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, + unsigned long start, unsigned long end, unsigned long size, + unsigned long expected, int eret, void *ptr) +{ + + unsigned long result = expected + 1; + int ret; + + ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) + return; + + MT_BUG_ON(mt, result != expected); +} + +static noinline void check_load(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + void *ret = mtree_test_load(mt, index); + + if (ret != ptr) + pr_err("Load %lu returned %p expect %p\n", index, ret, ptr); + MT_BUG_ON(mt, ret != ptr); +} + +static noinline void check_store_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr, int expected) +{ + int ret = -EINVAL; + unsigned long i; + + ret = mtree_test_store_range(mt, start, end, ptr); + MT_BUG_ON(mt, ret != expected); + + if (ret) + return; + + for (i = start; i <= end; i++) + check_load(mt, i, ptr); +} + +static noinline void check_insert_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr, int expected) +{ + int ret = -EINVAL; + unsigned long i; + + ret = mtree_test_insert_range(mt, start, end, ptr); + MT_BUG_ON(mt, ret != expected); + + if (ret) + return; + + for (i = start; i <= end; i++) + check_load(mt, i, ptr); +} + +static noinline void check_insert(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + int ret = -EINVAL; + + ret = mtree_test_insert(mt, index, ptr); + MT_BUG_ON(mt, ret != 0); +} + +static noinline void check_erase(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr); +} + +static noinline void check_dup_insert(struct maple_tree *mt, + unsigned long index, void *ptr) +{ + int ret = -EINVAL; + + ret = mtree_test_insert(mt, index, ptr); + MT_BUG_ON(mt, ret != -EEXIST); +} + + +static noinline +void check_index_load(struct maple_tree *mt, unsigned long index) +{ + return check_load(mt, index, xa_mk_value(index & LONG_MAX)); +} + +static noinline void check_nomem(struct maple_tree *mt) +{ + MA_STATE(ms, mt, 1, 1); + + MT_BUG_ON(mt, !mtree_empty(mt)); + /* Ensure no bypassing of allocation failures */ + mt_set_non_kernel(0); + + /* Storing something at 1 requires memory allocation */ + MT_BUG_ON(mt, mtree_insert(mt, 1, &ms, GFP_ATOMIC) != -ENOMEM); + /* Storing something at 0 does not */ + MT_BUG_ON(mt, mtree_insert(mt, 0, &ms, GFP_ATOMIC) != 0); + + /* + * Simulate two threads racing; the first one fails to allocate + * memory to insert an entry at 1, then the second one succeeds + * in allocating memory to insert an entry at 2. The first one + * then needs to free the node it allocated. LeakSanitizer will + * notice this, as will the 'nr_allocated' debugging aid in the + * userspace test suite. + */ + mtree_lock(mt); + mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */ + MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); + mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ + MT_BUG_ON(mt, ms.node != MAS_START); + mtree_unlock(mt); + MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0); + mtree_lock(mt); + mas_store(&ms, &ms); /* insert 1 -> &ms */ + mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ + mtree_unlock(mt); + mtree_destroy(mt); +} + +static inline int not_empty(struct maple_node *node) +{ + int i; + + if (node->parent) + return 1; + + for (i = 0; i < ARRAY_SIZE(node->slot); i++) + if (node->slot[i]) + return 1; + + return 0; +} + +static noinline void check_new_node(struct maple_tree *mt) +{ + + struct maple_node *mn, *mn2, *mn3; + struct maple_alloc *smn; + struct maple_node *nodes[100]; + int i, j, total; + + MA_STATE(mas, mt, 0, 0); + + /* Try allocating 3 nodes */ + mtree_lock(mt); + /* request 3 nodes to be allocated. */ + mas_node_count(&mas, 3); + /* Allocation request of 3. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); + /* Allocate failed. */ + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + mas_push_node(&mas, mn); + mas_nomem(&mas, GFP_KERNEL); /* free */ + mtree_unlock(mt); + + + /* Try allocating 1 node, then 2 more */ + mtree_lock(mt); + /* Set allocation request to 1. */ + mas_set_alloc_req(&mas, 1); + /* Check Allocation request of 1. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + mas_set_err(&mas, -ENOMEM); + /* Validate allocation request. */ + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + /* Eat the requested node. */ + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mn->slot[0] != NULL); + MT_BUG_ON(mt, mn->slot[1] != NULL); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + ma_free_rcu(mn); + mas.node = MAS_START; + mas_nomem(&mas, GFP_KERNEL); + /* Allocate 3 nodes, will fail. */ + mas_node_count(&mas, 3); + /* Drop the lock and allocate 3 nodes. */ + mas_nomem(&mas, GFP_KERNEL); + /* Ensure 3 are allocated. */ + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + /* Allocation request of 0. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); + + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); + /* Ensure we counted 3. */ + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + /* Free. */ + mas_nomem(&mas, GFP_KERNEL); + + /* Set allocation request to 1. */ + mas_set_alloc_req(&mas, 1); + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + mas_set_err(&mas, -ENOMEM); + /* Validate allocation request. */ + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + /* Check the node is only one node. */ + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mn->slot[0] != NULL); + MT_BUG_ON(mt, mn->slot[1] != NULL); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + MT_BUG_ON(mt, mas.alloc->node_count); + + mas_set_alloc_req(&mas, 2); /* request 2 more. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); + for (i = 2; i >= 0; i--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i); + MT_BUG_ON(mt, !mn); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + + total = 64; + mas_set_alloc_req(&mas, total); /* request 2 more. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != total); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (i = total; i > 0; i--) { + unsigned int e = 0; /* expected node_count */ + + if (i >= 35) + e = i - 35; + else if (i >= 5) + e = i - 5; + else if (i >= 2) + e = i - 2; + MT_BUG_ON(mt, mas.alloc->node_count != e); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); + MT_BUG_ON(mt, !mn); + ma_free_rcu(mn); + } + + total = 100; + for (i = 1; i < total; i++) { + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = i; j > 0; j--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + MT_BUG_ON(mt, !mn); + MT_BUG_ON(mt, not_empty(mn)); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != j); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = 0; j <= i/2; j++) { + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + nodes[j] = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); + } + + while (j) { + j--; + mas_push_node(&mas, nodes[j]); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + } + MT_BUG_ON(mt, mas_allocated(&mas) != i); + for (j = 0; j <= i/2; j++) { + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); + } + MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); + + } + + /* Set allocation request. */ + total = 500; + mas_node_count(&mas, total); + /* Drop the lock and allocate the nodes. */ + mas_nomem(&mas, GFP_KERNEL); + MT_BUG_ON(mt, !mas.alloc); + i = 1; + smn = mas.alloc; + while (i < total) { + for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { + i++; + MT_BUG_ON(mt, !smn->slot[j]); + if (i == total) + break; + } + smn = smn->slot[0]; /* next. */ + } + MT_BUG_ON(mt, mas_allocated(&mas) != total); + mas_nomem(&mas, GFP_KERNEL); /* Free. */ + + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + for (i = 1; i < 128; i++) { + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ + for (j = i; j > 0; j--) { /*Free the requests */ + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + } + + for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { + MA_STATE(mas2, mt, 0, 0); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ + for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + mas_push_node(&mas2, mn); + MT_BUG_ON(mt, mas_allocated(&mas2) != j); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas2) != i); + + for (j = i; j > 0; j--) { /*Free the requests */ + MT_BUG_ON(mt, mas_allocated(&mas2) != j); + mn = mas_pop_node(&mas2); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas2) != 0); + } + + + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2); + + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + + /* Check the limit of pop/push/pop */ + mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_alloc_req(&mas)); + MT_BUG_ON(mt, mas.alloc->node_count); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas.alloc->node_count); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + + for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put it back */ + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mn2 = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put them back */ + mas_push_node(&mas, mn2); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mn2 = mas_pop_node(&mas); /* get the next node. */ + mn3 = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put them back */ + mas_push_node(&mas, mn2); + mas_push_node(&mas, mn3); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mas_destroy(&mas); + } + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 5); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != 5); + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 10); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mas.node = MAS_START; + MT_BUG_ON(mt, mas_allocated(&mas) != 10); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mas.node = MAS_START; + MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); + mas_destroy(&mas); + + mtree_unlock(mt); +} + +static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, + bool verbose) +{ + unsigned long i = max, j; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + mt_zero_nr_tallocated(); + while (i) { + MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); + for (j = i; j <= max; j++) + check_index_load(mt, j); + + check_load(mt, i - 1, NULL); + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_height(mt)); + mt_clear_in_rcu(mt); + MT_BUG_ON(mt, !mt_height(mt)); + i--; + } + check_load(mt, max + 1, NULL); + + if (verbose) { + rcu_barrier(); + mt_dump(mt); + pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n", + __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), + mt_nr_tallocated()); + } +} + +static noinline void check_seq(struct maple_tree *mt, unsigned long max, + bool verbose) +{ + unsigned long i, j; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + mt_zero_nr_tallocated(); + for (i = 0; i <= max; i++) { + MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); + for (j = 0; j <= i; j++) + check_index_load(mt, j); + + if (i) + MT_BUG_ON(mt, !mt_height(mt)); + check_load(mt, i + 1, NULL); + } + if (verbose) { + rcu_barrier(); + mt_dump(mt); + pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", + max, mt_get_alloc_size()/1024, mt_nr_allocated(), + mt_nr_tallocated()); + } +} + +static noinline void check_lb_not_empty(struct maple_tree *mt) +{ + unsigned long i, j; + unsigned long huge = 4000UL * 1000 * 1000; + + + i = huge; + while (i > 4096) { + check_insert(mt, i, (void *) i); + for (j = huge; j >= i; j /= 2) { + check_load(mt, j-1, NULL); + check_load(mt, j, (void *) j); + check_load(mt, j+1, NULL); + } + i /= 2; + } + mtree_destroy(mt); +} + +static noinline void check_lower_bound_split(struct maple_tree *mt) +{ + MT_BUG_ON(mt, !mtree_empty(mt)); + check_lb_not_empty(mt); +} + +static noinline void check_upper_bound_split(struct maple_tree *mt) +{ + unsigned long i, j; + unsigned long huge = 4000UL * 1000 * 1000; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + i = 4096; + while (i < huge) { + check_insert(mt, i, (void *) i); + for (j = i; j >= huge; j *= 2) { + check_load(mt, j-1, NULL); + check_load(mt, j, (void *) j); + check_load(mt, j+1, NULL); + } + i *= 2; + } + mtree_destroy(mt); +} + +static noinline void check_mid_split(struct maple_tree *mt) +{ + unsigned long huge = 8000UL * 1000 * 1000; + + check_insert(mt, huge, (void *) huge); + check_insert(mt, 0, xa_mk_value(0)); + check_lb_not_empty(mt); +} + +static noinline void check_rev_find(struct maple_tree *mt) +{ + int i, nr_entries = 200; + void *val; + MA_STATE(mas, mt, 0, 0); + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + mas_set(&mas, 1000); + val = mas_find_rev(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(100)); + val = mas_find_rev(&mas, 1000); + MT_BUG_ON(mt, val != NULL); + + mas_set(&mas, 999); + val = mas_find_rev(&mas, 997); + MT_BUG_ON(mt, val != NULL); + + mas_set(&mas, 1000); + val = mas_find_rev(&mas, 900); + MT_BUG_ON(mt, val != xa_mk_value(100)); + val = mas_find_rev(&mas, 900); + MT_BUG_ON(mt, val != xa_mk_value(99)); + + mas_set(&mas, 20); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(2)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(1)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(0)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != NULL); +} + +static noinline void check_find(struct maple_tree *mt) +{ + unsigned long val = 0; + unsigned long count = 20; + unsigned long max; + unsigned long last = 0, index = 0; + void *entry, *entry2; + + MA_STATE(mas, mt, 0, 0); + + /* Insert 0. */ + MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL)); + + for (int i = 0; i <= count; i++) { + if (val != 64) + MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL)); + else + MT_BUG_ON(mt, mtree_insert(mt, val, + XA_ZERO_ENTRY, GFP_KERNEL)); + + val <<= 2; + } + + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + while ((entry = mas_find(&mas, 268435456)) != NULL) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + mas_unlock(&mas); + + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + mas_unlock(&mas); + + /* Test mas_pause */ + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + + mas_pause(&mas); + mas_unlock(&mas); + mas_lock(&mas); + } + mas_unlock(&mas); + + val = 0; + max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */ + mt_for_each(mt, entry, index, max) { + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + if (val == 64) /* Skip zero entry. */ + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + + val = 0; + max = 0; + index = 0; + MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); + mt_for_each(mt, entry, index, ULONG_MAX) { + if (val == 4398046511104) + MT_BUG_ON(mt, entry != + xa_mk_value(ULONG_MAX & LONG_MAX)); + else + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + if (val == 64) /* Skip zero entry. */ + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + max++; + MT_BUG_ON(mt, max > 25); + } + mtree_erase_index(mt, ULONG_MAX); + + mas_reset(&mas); + index = 17; + entry = mt_find(mt, &index, 512); + MT_BUG_ON(mt, xa_mk_value(256) != entry); + + mas_reset(&mas); + index = 17; + entry = mt_find(mt, &index, 20); + MT_BUG_ON(mt, entry != NULL); + + + /* Range check.. */ + /* Insert ULONG_MAX */ + MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); + + val = 0; + mas_set(&mas, 0); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val == 64) + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + else if (val == 4398046511104) + MT_BUG_ON(mt, entry != xa_mk_value(ULONG_MAX & LONG_MAX)); + else + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + + /* For zero check. */ + if (!val) + val = 1; + mas_pause(&mas); + mas_unlock(&mas); + mas_lock(&mas); + } + mas_unlock(&mas); + + mas_set(&mas, 1048576); + mas_lock(&mas); + entry = mas_find(&mas, 1048576); + mas_unlock(&mas); + MT_BUG_ON(mas.tree, entry == NULL); + + /* + * Find last value. + * 1. get the expected value, leveraging the existence of an end entry + * 2. delete end entry + * 3. find the last value but searching for ULONG_MAX and then using + * prev + */ + /* First, get the expected result. */ + mas_lock(&mas); + mas_reset(&mas); + mas.index = ULONG_MAX; /* start at max.. */ + entry = mas_find(&mas, ULONG_MAX); + entry = mas_prev(&mas, 0); + index = mas.index; + last = mas.last; + + /* Erase the last entry. */ + mas_reset(&mas); + mas.index = ULONG_MAX; + mas.last = ULONG_MAX; + mas_erase(&mas); + + /* Get the previous value from MAS_START */ + mas_reset(&mas); + entry2 = mas_prev(&mas, 0); + + /* Check results. */ + MT_BUG_ON(mt, entry != entry2); + MT_BUG_ON(mt, index != mas.index); + MT_BUG_ON(mt, last != mas.last); + + + mas.node = MAS_NONE; + mas.index = ULONG_MAX; + mas.last = ULONG_MAX; + entry2 = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != entry2); + + mas_set(&mas, 0); + MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL); + + mas_unlock(&mas); + mtree_destroy(mt); +} + +static noinline void check_find_2(struct maple_tree *mt) +{ + unsigned long i, j; + void *entry; + + MA_STATE(mas, mt, 0, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) + MT_BUG_ON(mt, true); + rcu_read_unlock(); + + for (i = 0; i < 256; i++) { + mtree_insert_index(mt, i, GFP_KERNEL); + j = 0; + mas_set(&mas, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + MT_BUG_ON(mt, entry != xa_mk_value(j)); + j++; + } + rcu_read_unlock(); + MT_BUG_ON(mt, j != i + 1); + } + + for (i = 0; i < 256; i++) { + mtree_erase_index(mt, i); + j = i + 1; + mas_set(&mas, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + if (xa_is_zero(entry)) + continue; + + MT_BUG_ON(mt, entry != xa_mk_value(j)); + j++; + } + rcu_read_unlock(); + MT_BUG_ON(mt, j != 256); + } + + /*MT_BUG_ON(mt, !mtree_empty(mt)); */ +} + +#define erase_ptr(i) entry[i%2] +#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2]) +#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2]) +#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2]) + +static noinline void check_erase_testset(struct maple_tree *mt) +{ + unsigned long set[] = { 5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 6003, 6002, 6008, 6012, 6015, + 7003, 7002, 7008, 7012, 7015, + 8003, 8002, 8008, 8012, 8015, + 9003, 9002, 9008, 9012, 9015, + 10003, 10002, 10008, 10012, 10015, + 11003, 11002, 11008, 11012, 11015, + 12003, 12002, 12008, 12012, 12015, + 13003, 13002, 13008, 13012, 13015, + 14003, 14002, 14008, 14012, 14015, + 15003, 15002, 15008, 15012, 15015, + }; + + + void *ptr = &set; + void *entry[2] = { ptr, mt }; + void *root_node; + + + rcu_register_thread(); + mt_set_in_rcu(mt); + for (int i = 0; i < 4; i++) + erase_check_insert(mt, i); + for (int i = 0; i < 4; i++) + erase_check_load(mt, i); + + mt_set_non_kernel(2); + erase_check_erase(mt, 1); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + for (int i = 2; i < 4; i++) + erase_check_load(mt, i); + + + erase_check_erase(mt, 2); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + check_load(mt, set[2], NULL); + + erase_check_insert(mt, 1); + erase_check_insert(mt, 2); + + for (int i = 0; i < 4; i++) + erase_check_load(mt, i); + + /* Check erase and load without an allocation. */ + erase_check_load(mt, 3); + erase_check_erase(mt, 1); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + for (int i = 2; i < 4; i++) + erase_check_load(mt, i); + + /* + * Set the newly erased node. This will produce a different allocated + * node to avoid busy slots. + */ + root_node = mt->ma_root; + erase_check_insert(mt, 1); + + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + erase_check_load(mt, 2); + check_load(mt, 5018, NULL); + erase_check_load(mt, 3); + + erase_check_erase(mt, 2); /* erase 5017 to check append */ + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + check_load(mt, set[2], NULL); + check_load(mt, 5018, NULL); + + erase_check_load(mt, 3); + + root_node = mt->ma_root; + erase_check_insert(mt, 2); + + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + erase_check_load(mt, 2); + check_load(mt, 5018, NULL); + erase_check_load(mt, 3); + + mt_set_non_kernel(1); + erase_check_erase(mt, 2); /* erase 5017 to check append */ + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + check_load(mt, set[2], NULL); + erase_check_erase(mt, 0); /* erase 5015 to check append */ + check_load(mt, set[0], NULL); + check_load(mt, 5016, NULL); + erase_check_insert(mt, 4); /* 1000 < Should not split. */ + check_load(mt, set[0], NULL); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + check_load(mt, set[2], NULL); + check_load(mt, 5018, NULL); + erase_check_load(mt, 4); + check_load(mt, 999, NULL); + check_load(mt, 1001, NULL); + erase_check_load(mt, 4); + if (mt_in_rcu(mt)) + MT_BUG_ON(mt, root_node == mt->ma_root); + else + MT_BUG_ON(mt, root_node != mt->ma_root); + + /* Should not have split. */ + MT_BUG_ON(mt, !mte_is_leaf(mt->ma_root)); + + + /* Coalesce testing */ + erase_check_insert(mt, 0); + erase_check_insert(mt, 2); + + for (int i = 5; i < 25; i++) { + erase_check_insert(mt, i); + for (int j = i; j >= 0; j--) + erase_check_load(mt, j); + } + + erase_check_erase(mt, 14); /*6015 */ + for (int i = 0; i < 25; i++) { + if (i == 14) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + erase_check_erase(mt, 16); /*7002 */ + for (int i = 0; i < 25; i++) { + if (i == 16 || i == 14) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + + mt_set_non_kernel(1); + erase_check_erase(mt, 13); /*6012 */ + for (int i = 0; i < 25; i++) { + if (i == 16 || i == 14 || i == 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + erase_check_erase(mt, 15); /*7003 */ + for (int i = 0; i < 25; i++) { + if (i <= 16 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + mt_set_non_kernel(2); + erase_check_erase(mt, 17); /*7008 *should* cause coalesce. */ + for (int i = 0; i < 25; i++) { + if (i <= 17 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + erase_check_erase(mt, 18); /*7012 */ + for (int i = 0; i < 25; i++) { + if (i <= 18 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + mt_set_non_kernel(2); + erase_check_erase(mt, 19); /*7015 */ + for (int i = 0; i < 25; i++) { + if (i <= 19 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + erase_check_erase(mt, 20); /*8003 */ + for (int i = 0; i < 25; i++) { + if (i <= 20 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + erase_check_erase(mt, 21); /*8002 */ + for (int i = 0; i < 25; i++) { + if (i <= 21 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + mt_set_non_kernel(2); + erase_check_erase(mt, 22); /*8008 */ + for (int i = 0; i < 25; i++) { + if (i <= 22 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + for (int i = 23; i < 25; i++) + erase_check_erase(mt, i); + + for (int i = 0; i < 25; i++) { + if (i <= 25 && i >= 13) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + + /* Shrinking tree test. */ + + for (int i = 13; i < ARRAY_SIZE(set); i++) + erase_check_insert(mt, i); + + mt_set_non_kernel(99); + for (int i = 18; i < ARRAY_SIZE(set); i++) { + erase_check_erase(mt, i); + for (int j = 0; j < ARRAY_SIZE(set); j++) { + if (j < 18 || j > i) + erase_check_load(mt, j); + else + check_load(mt, set[j], NULL); + } + } + mt_set_non_kernel(35); + for (int i = 0; i < 18; i++) { + erase_check_erase(mt, i); + for (int j = 0; j < ARRAY_SIZE(set); j++) { + if (j < 18 && j > i) + erase_check_load(mt, j); + else + check_load(mt, set[j], NULL); + } + } + erase_check_insert(mt, 8); + erase_check_insert(mt, 9); + erase_check_erase(mt, 8); + rcu_unregister_thread(); +} + +#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, \ + a[(i)], a[(i + 1)], ptr) +#define STORE 1 +#define SNULL 2 +#define ERASE 3 +#define ec_type_str(x) \ + (((x) == STORE) ? \ + "STORE" : \ + (((x) == SNULL) ? \ + "SNULL" : "ERASE") \ + ) +#define check_erase2_debug 0 +void *mas_next(struct ma_state *mas, unsigned long max); + +/* Calculate the overwritten entries. */ +int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end, + void *s_entry, unsigned long s_min, + void *e_entry, unsigned long e_max, + unsigned long *set, int i, bool null_entry) +{ + int count = 0, span = 0; + unsigned long retry = 0; + void *entry; + struct ma_state tmp; + + + /* count slots */ + memcpy(&tmp, mas_start, sizeof(tmp)); + entry = mas_next(&tmp, mas_end->last); + while (entry) { + BUG_ON(retry > 50); /* stop infinite retry on testing. */ + if (xa_is_zero(s_entry)) { + retry++; + continue; + } + count++; + span++; + entry = mas_next(&tmp, mas_end->last); + } + + if (null_entry) { + /* Check splitting end. */ + if (e_entry && (e_max > mas_end->last)) + count--; + + /* check overwrite of entire start */ + if (s_entry && (s_min == mas_start->index)) + count++; + } else { /* !null_entry (store) */ + bool esplit = e_max > mas_end->last; + bool ssplit = s_min != mas_start->index; + + if (s_entry && e_entry) { + if (esplit && ssplit) + count--; + else if (ssplit) + count--; + else if (esplit) { + if (span) + count--; + } + } else if (s_entry && !e_entry) { + if (ssplit) + count--; + } else if (!s_entry && e_entry) { + if (esplit) + count--; + count--; + } else { + count--; + } + } + return count; +} + +/* + * mas_node_walk() - Walk a maple node to offset of the index. + * @mas: The maple state + * @type: The maple node type + * @*range_min: Pointer to store the minimum range of the offset + * @*range_max: Pointer to store the maximum range of the offset + * + * The offset will be stored in the maple state. + * + */ +static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node, + enum maple_type type, unsigned long *range_min, + unsigned long *range_max) + +{ + unsigned long *pivots; + unsigned char count; + unsigned long prev, max; + unsigned char offset; + unsigned long index; + + if (unlikely(ma_is_dense(type))) { + (*range_max) = (*range_min) = mas->index; + if (unlikely(ma_dead_node(node))) + return; + + mas->offset = mas->index = mas->min; + return; + } + + pivots = ma_pivots(node, type); + max = pivots[0]; + if (unlikely(ma_dead_node(node))) + return; + + offset = 0; + prev = mas->min; + index = mas->index; + if (unlikely(index <= max)) + goto offset_zero; + + count = mt_pivots[type]; + while (++offset < count) { + prev = max; + max = pivots[offset]; + if (unlikely(ma_dead_node(node))) + return; + + if (index <= max) + goto offset_found; + else if (unlikely(!max)) + goto mas_max; + } + + prev = max; +mas_max: + max = mas->max; +offset_found: + prev++; +offset_zero: + mas->offset = offset; + if (ma_is_leaf(type)) { + *range_max = max; + *range_min = prev; + } else { + mas->max = max; + mas->min = prev; + } +} + +/* + * mas_descend_walk(): Locates a value and sets the mas->node and slot + * accordingly. range_min and range_max are set to the range which the entry is + * valid. + * @mas: The maple state + * @*range_min: A pointer to store the minimum of the range + * @*range_max: A pointer to store the maximum of the range + * + * Check mas->node is still valid on return of any value. + * + * Return: true if pointing to a valid node and offset. False otherwise. + */ +static inline bool mas_descend_walk(struct ma_state *mas, + unsigned long *range_min, unsigned long *range_max) +{ + struct maple_enode *next; + struct maple_node *node; + enum maple_type type; + + next = mas->node; + while (true) { + node = mte_to_node(next); + type = mte_node_type(next); + mas_node_walk(mas, node, type, range_min, range_max); + next = mas_slot(mas, ma_slots(node, type), mas->offset); + if (unlikely(ma_dead_node(node))) + return false; + + if (unlikely(ma_is_leaf(type))) + return true; + + /* Descend. */ + mas->node = next; + } + return false; +} + +/* + * mas_tree_walk() - Walk to @mas->index and set the range values. + * @mas: The maple state. + * @*range_min: The minimum range to be set. + * @*range_max: The maximum range to be set. + * + * Ranges are only valid if there is a valid entry at @mas->index. + * + * Return: True if a value exists, false otherwise. + */ +static inline bool mas_tree_walk(struct ma_state *mas, unsigned long *range_min, + unsigned long *range_max) +{ + bool ret; + +retry: + ret = false; + mas_start(mas); + if (mas_is_none(mas)) + goto not_found; + + if (mas_is_ptr(mas)) { + *range_min = *range_max = 0; + if (!mas->index) + return true; + + goto not_found; + } + + ret = mas_descend_walk(mas, range_min, range_max); + if (unlikely(mte_dead_node(mas->node))) { + mas->node = MAS_START; + goto retry; + } + + return ret; + +not_found: + mas->offset = MAPLE_NODE_SLOTS; + return false; +} + +static inline void *mas_range_load(struct ma_state *mas, + unsigned long *range_min, unsigned long *range_max) + +{ + void *entry = NULL; + unsigned long index = mas->index; + + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; +retry: + if (mas_tree_walk(mas, range_min, range_max)) + if (unlikely(mas->node == MAS_ROOT)) + return mas_root(mas); + + if (likely(mas->offset != MAPLE_NODE_SLOTS)) + entry = mas_get_slot(mas, mas->offset); + + if (mas_dead_node(mas, index)) + goto retry; + + return entry; +} +static noinline void check_erase2_testset(struct maple_tree *mt, + unsigned long *set, unsigned long size) +{ + int entry_count = 0; + int check = 0; + void *foo; + unsigned long addr = 0; + void *s_entry = NULL, *e_entry = NULL; + + MA_STATE(mas, mt, 0, 0); + + for (int i = 0; i < size; i += 3) { + unsigned long s_min, s_max; + unsigned long e_min, e_max; + void *value = NULL; + + MA_STATE(mas_start, mt, set[i+1], set[i+1]); + MA_STATE(mas_end, mt, set[i+2], set[i+2]); + mt_set_non_kernel(127); +#if check_erase2_debug + pr_err("%s: %d %s %lu - %lu\n", __func__, i, + ec_type_str(set[i]), + set[i+1], set[i+2]); +#endif + s_entry = mas_range_load(&mas_start, &s_min, &s_max); + e_entry = mas_range_load(&mas_end, &e_min, &e_max); + + switch (set[i]) { + case SNULL: + if ((s_min == set[i+1]) && (s_max == set[i+2])) { + entry_count--; + } else if ((s_min != set[i+1]) && (s_max != set[i+2])) { + entry_count++; + } else if ((mas_start.node != mas_end.node) || + (mas_start.offset != mas_end.offset)) { + entry_count -= + mas_ce2_over_count(&mas_start, &mas_end, + s_entry, s_min, + e_entry, e_max, set, i, + true); + } + + + erase_check_store_range(mt, set, i + 1, value); + break; + case STORE: + value = xa_mk_value(set[i + 1]); + if (mas_start.offset > mt_slot_count(mas_start.node)) { + entry_count++; /* appending an entry. */ + } else if ((s_min == e_min) && (s_max == e_max)) { + if (!entry_count) + entry_count++; + + else if (s_entry) { + if (e_max > mas_end.last) + entry_count++; + + if (s_min < mas_start.index) + entry_count++; + + } else { + entry_count++; + } + } else { + entry_count -= + mas_ce2_over_count(&mas_start, &mas_end, + s_entry, s_min, + e_entry, e_max, set, i, + false); + } + + erase_check_store_range(mt, set, i + 1, value); + break; + case ERASE: + if (!s_entry) + break; + check_erase(mt, set[i+1], xa_mk_value(set[i+1])); + entry_count--; + break; + } + mt_validate(mt); + if (entry_count) + MT_BUG_ON(mt, !mt_height(mt)); +#if check_erase2_debug > 1 + mt_dump(mt); +#endif +#if check_erase2_debug + pr_err("Done\n"); +#endif + + check = 0; + addr = 0; + mt_for_each(mt, foo, addr, ULONG_MAX) { + check++; +#if check_erase2_debug > 2 + pr_err("mt: %lu -> %p (%d)\n", addr+1, foo, check); +#endif + if (check > entry_count) + break; + } + +#if check_erase2_debug > 2 + pr_err("mt_for_each %d and count %d\n", check, entry_count); +#endif + + MT_BUG_ON(mt, check != entry_count); + + check = 0; + addr = 0; + mas_reset(&mas); + mas.index = 0; + rcu_read_lock(); + mas_for_each(&mas, foo, ULONG_MAX) { + if (xa_is_zero(foo)) { + if (addr == mas.index) { + mt_dump(mas.tree); + pr_err("retry failed %lu - %lu\n", + mas.index, mas.last); + MT_BUG_ON(mt, 1); + } + addr = mas.index; + continue; + } +#if check_erase2_debug > 2 + pr_err("mas: %lu -> %p\n", mas.index, foo); +#endif + check++; + if (check > entry_count) + break; + } + rcu_read_unlock(); +#if check_erase2_debug > 2 + pr_err("mas_for_each %d and count %d\n", check, entry_count); + mt_validate(mt); +#endif + + MT_BUG_ON(mt, check != entry_count); + + MT_BUG_ON(mt, mtree_load(mas.tree, 0) != NULL); + } +} + + +/* These tests were pulled from kvm tests. */ +static noinline void check_erase2_sets(struct maple_tree *mt) +{ + void *entry; + unsigned long start = 0; + unsigned long set[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140721266458624, 140737488351231, +ERASE, 140721266458624, 140737488351231, +STORE, 140721266458624, 140721266462719, +STORE, 94735788949504, 94735789121535, +ERASE, 94735788949504, 94735789121535, +STORE, 94735788949504, 94735788965887, +STORE, 94735788965888, 94735789121535, +ERASE, 94735788965888, 94735789121535, +STORE, 94735788965888, 94735789068287, +STORE, 94735789068288, 94735789109247, +STORE, 94735789109248, 94735789121535, +STORE, 140253902692352, 140253902864383, +ERASE, 140253902692352, 140253902864383, +STORE, 140253902692352, 140253902696447, +STORE, 140253902696448, 140253902864383, + }; + unsigned long set2[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140735933583360, 140737488351231, +ERASE, 140735933583360, 140737488351231, +STORE, 140735933583360, 140735933587455, +STORE, 94811003260928, 94811003432959, +ERASE, 94811003260928, 94811003432959, +STORE, 94811003260928, 94811003277311, +STORE, 94811003277312, 94811003432959, +ERASE, 94811003277312, 94811003432959, +STORE, 94811003277312, 94811003379711, +STORE, 94811003379712, 94811003420671, +STORE, 94811003420672, 94811003432959, +STORE, 140277094653952, 140277094825983, +ERASE, 140277094653952, 140277094825983, +STORE, 140277094653952, 140277094658047, +STORE, 140277094658048, 140277094825983, +ERASE, 140277094658048, 140277094825983, +STORE, 140277094658048, 140277094780927, +STORE, 140277094780928, 140277094813695, +STORE, 140277094813696, 140277094821887, +STORE, 140277094821888, 140277094825983, +STORE, 140735933906944, 140735933911039, + }; + unsigned long set3[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140735790264320, 140737488351231, +ERASE, 140735790264320, 140737488351231, +STORE, 140735790264320, 140735790268415, +STORE, 94016597282816, 94016597454847, +ERASE, 94016597282816, 94016597454847, +STORE, 94016597282816, 94016597299199, +STORE, 94016597299200, 94016597454847, +ERASE, 94016597299200, 94016597454847, +STORE, 94016597299200, 94016597401599, +STORE, 94016597401600, 94016597442559, +STORE, 94016597442560, 94016597454847, +STORE, 140496959283200, 140496959455231, +ERASE, 140496959283200, 140496959455231, +STORE, 140496959283200, 140496959287295, +STORE, 140496959287296, 140496959455231, +ERASE, 140496959287296, 140496959455231, +STORE, 140496959287296, 140496959410175, +STORE, 140496959410176, 140496959442943, +STORE, 140496959442944, 140496959451135, +STORE, 140496959451136, 140496959455231, +STORE, 140735791718400, 140735791722495, +STORE, 140735791706112, 140735791718399, +STORE, 47135835713536, 47135835721727, +STORE, 47135835721728, 47135835729919, +STORE, 47135835729920, 47135835893759, +ERASE, 47135835729920, 47135835893759, +STORE, 47135835729920, 47135835742207, +STORE, 47135835742208, 47135835893759, +STORE, 47135835840512, 47135835893759, +STORE, 47135835742208, 47135835840511, +ERASE, 47135835742208, 47135835840511, +STORE, 47135835742208, 47135835840511, +STORE, 47135835885568, 47135835893759, +STORE, 47135835840512, 47135835885567, +ERASE, 47135835840512, 47135835885567, +STORE, 47135835840512, 47135835893759, +ERASE, 47135835840512, 47135835893759, +STORE, 47135835840512, 47135835885567, +STORE, 47135835885568, 47135835893759, + }; + + unsigned long set4[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140728251703296, 140737488351231, +ERASE, 140728251703296, 140737488351231, +STORE, 140728251703296, 140728251707391, +STORE, 94668429205504, 94668429377535, +ERASE, 94668429205504, 94668429377535, +STORE, 94668429205504, 94668429221887, +STORE, 94668429221888, 94668429377535, +ERASE, 94668429221888, 94668429377535, +STORE, 94668429221888, 94668429324287, +STORE, 94668429324288, 94668429365247, +STORE, 94668429365248, 94668429377535, +STORE, 47646523273216, 47646523445247, +ERASE, 47646523273216, 47646523445247, +STORE, 47646523273216, 47646523277311, +STORE, 47646523277312, 47646523445247, +ERASE, 47646523277312, 47646523445247, +STORE, 47646523277312, 47646523400191, + }; + + unsigned long set5[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140726874062848, 140737488351231, +ERASE, 140726874062848, 140737488351231, +STORE, 140726874062848, 140726874066943, +STORE, 94248892870656, 94248893042687, +ERASE, 94248892870656, 94248893042687, +STORE, 94248892870656, 94248892887039, +STORE, 94248892887040, 94248893042687, +ERASE, 94248892887040, 94248893042687, +STORE, 94248892887040, 94248892989439, +STORE, 94248892989440, 94248893030399, +STORE, 94248893030400, 94248893042687, +STORE, 47884786266112, 47884786438143, +ERASE, 47884786266112, 47884786438143, +STORE, 47884786266112, 47884786270207, +STORE, 47884786270208, 47884786438143, +ERASE, 47884786270208, 47884786438143, +STORE, 47884786270208, 47884786393087, +STORE, 47884786393088, 47884786425855, +STORE, 47884786425856, 47884786434047, +STORE, 47884786434048, 47884786438143, +STORE, 140726874513408, 140726874517503, +STORE, 140726874501120, 140726874513407, +STORE, 47884786438144, 47884786446335, +STORE, 47884786446336, 47884786454527, +STORE, 47884786454528, 47884786618367, +ERASE, 47884786454528, 47884786618367, +STORE, 47884786454528, 47884786466815, +STORE, 47884786466816, 47884786618367, +STORE, 47884786565120, 47884786618367, +STORE, 47884786466816, 47884786565119, +ERASE, 47884786466816, 47884786565119, +STORE, 47884786466816, 47884786565119, +STORE, 47884786610176, 47884786618367, +STORE, 47884786565120, 47884786610175, +ERASE, 47884786565120, 47884786610175, +STORE, 47884786565120, 47884786618367, +ERASE, 47884786565120, 47884786618367, +STORE, 47884786565120, 47884786610175, +STORE, 47884786610176, 47884786618367, +ERASE, 47884786610176, 47884786618367, +STORE, 47884786610176, 47884786618367, +STORE, 47884786618368, 47884789669887, +STORE, 47884787163136, 47884789669887, +STORE, 47884786618368, 47884787163135, +ERASE, 47884787163136, 47884789669887, +STORE, 47884787163136, 47884789448703, +STORE, 47884789448704, 47884789669887, +STORE, 47884788858880, 47884789448703, +STORE, 47884787163136, 47884788858879, +ERASE, 47884787163136, 47884788858879, +STORE, 47884787163136, 47884788858879, +STORE, 47884789444608, 47884789448703, +STORE, 47884788858880, 47884789444607, +ERASE, 47884788858880, 47884789444607, +STORE, 47884788858880, 47884789444607, +STORE, 47884789653504, 47884789669887, +STORE, 47884789448704, 47884789653503, +ERASE, 47884789448704, 47884789653503, +STORE, 47884789448704, 47884789653503, +ERASE, 47884789653504, 47884789669887, +STORE, 47884789653504, 47884789669887, +STORE, 47884789669888, 47884791508991, +STORE, 47884789809152, 47884791508991, +STORE, 47884789669888, 47884789809151, +ERASE, 47884789809152, 47884791508991, +STORE, 47884789809152, 47884791468031, +STORE, 47884791468032, 47884791508991, +STORE, 47884791152640, 47884791468031, +STORE, 47884789809152, 47884791152639, +ERASE, 47884789809152, 47884791152639, +STORE, 47884789809152, 47884791152639, +STORE, 47884791463936, 47884791468031, +STORE, 47884791152640, 47884791463935, +ERASE, 47884791152640, 47884791463935, +STORE, 47884791152640, 47884791463935, +STORE, 47884791492608, 47884791508991, +STORE, 47884791468032, 47884791492607, +ERASE, 47884791468032, 47884791492607, +STORE, 47884791468032, 47884791492607, +ERASE, 47884791492608, 47884791508991, +STORE, 47884791492608, 47884791508991, +STORE, 47884791508992, 47884791644159, +ERASE, 47884791508992, 47884791644159, +STORE, 47884791508992, 47884791533567, +STORE, 47884791533568, 47884791644159, +STORE, 47884791595008, 47884791644159, +STORE, 47884791533568, 47884791595007, +ERASE, 47884791533568, 47884791595007, +STORE, 47884791533568, 47884791595007, +STORE, 47884791619584, 47884791644159, +STORE, 47884791595008, 47884791619583, +ERASE, 47884791595008, 47884791619583, +STORE, 47884791595008, 47884791644159, +ERASE, 47884791595008, 47884791644159, +STORE, 47884791595008, 47884791619583, +STORE, 47884791619584, 47884791644159, +STORE, 47884791627776, 47884791644159, +STORE, 47884791619584, 47884791627775, +ERASE, 47884791619584, 47884791627775, +STORE, 47884791619584, 47884791627775, +ERASE, 47884791627776, 47884791644159, +STORE, 47884791627776, 47884791644159, +STORE, 47884791644160, 47884791664639, +ERASE, 47884791644160, 47884791664639, +STORE, 47884791644160, 47884791648255, +STORE, 47884791648256, 47884791664639, +STORE, 47884791652352, 47884791664639, +STORE, 47884791648256, 47884791652351, +ERASE, 47884791648256, 47884791652351, +STORE, 47884791648256, 47884791652351, +STORE, 47884791656448, 47884791664639, +STORE, 47884791652352, 47884791656447, +ERASE, 47884791652352, 47884791656447, +STORE, 47884791652352, 47884791664639, +ERASE, 47884791652352, 47884791664639, +STORE, 47884791652352, 47884791656447, +STORE, 47884791656448, 47884791664639, +ERASE, 47884791656448, 47884791664639, +STORE, 47884791656448, 47884791664639, +STORE, 47884791664640, 47884791672831, +ERASE, 47884791468032, 47884791492607, +STORE, 47884791468032, 47884791484415, +STORE, 47884791484416, 47884791492607, +ERASE, 47884791656448, 47884791664639, +STORE, 47884791656448, 47884791660543, +STORE, 47884791660544, 47884791664639, +ERASE, 47884791619584, 47884791627775, +STORE, 47884791619584, 47884791623679, +STORE, 47884791623680, 47884791627775, + }; + + unsigned long set6[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140722999021568, 140737488351231, +ERASE, 140722999021568, 140737488351231, +STORE, 140722999021568, 140722999025663, +STORE, 94901500268544, 94901500440575, +ERASE, 94901500268544, 94901500440575, +STORE, 94901500268544, 94901500284927, +STORE, 94901500284928, 94901500440575, +ERASE, 94901500284928, 94901500440575, +STORE, 94901500284928, 94901500387327, +STORE, 94901500387328, 94901500428287, +STORE, 94901500428288, 94901500440575, +STORE, 47430426660864, 47430426832895, +ERASE, 47430426660864, 47430426832895, +STORE, 47430426660864, 47430426664959, +STORE, 47430426664960, 47430426832895, +ERASE, 47430426664960, 47430426832895, +STORE, 47430426664960, 47430426787839, +STORE, 47430426787840, 47430426820607, +STORE, 47430426820608, 47430426828799, +STORE, 47430426828800, 47430426832895, +STORE, 140722999115776, 140722999119871, +STORE, 140722999103488, 140722999115775, +STORE, 47430426832896, 47430426841087, +STORE, 47430426841088, 47430426849279, +STORE, 47430426849280, 47430427013119, +ERASE, 47430426849280, 47430427013119, +STORE, 47430426849280, 47430426861567, +STORE, 47430426861568, 47430427013119, +STORE, 47430426959872, 47430427013119, +STORE, 47430426861568, 47430426959871, +ERASE, 47430426861568, 47430426959871, +STORE, 47430426861568, 47430426959871, +STORE, 47430427004928, 47430427013119, +STORE, 47430426959872, 47430427004927, +ERASE, 47430426959872, 47430427004927, +STORE, 47430426959872, 47430427013119, +ERASE, 47430426959872, 47430427013119, +STORE, 47430426959872, 47430427004927, +STORE, 47430427004928, 47430427013119, +ERASE, 47430427004928, 47430427013119, +STORE, 47430427004928, 47430427013119, +STORE, 47430427013120, 47430430064639, +STORE, 47430427557888, 47430430064639, +STORE, 47430427013120, 47430427557887, +ERASE, 47430427557888, 47430430064639, +STORE, 47430427557888, 47430429843455, +STORE, 47430429843456, 47430430064639, +STORE, 47430429253632, 47430429843455, +STORE, 47430427557888, 47430429253631, +ERASE, 47430427557888, 47430429253631, +STORE, 47430427557888, 47430429253631, +STORE, 47430429839360, 47430429843455, +STORE, 47430429253632, 47430429839359, +ERASE, 47430429253632, 47430429839359, +STORE, 47430429253632, 47430429839359, +STORE, 47430430048256, 47430430064639, +STORE, 47430429843456, 47430430048255, +ERASE, 47430429843456, 47430430048255, +STORE, 47430429843456, 47430430048255, +ERASE, 47430430048256, 47430430064639, +STORE, 47430430048256, 47430430064639, +STORE, 47430430064640, 47430431903743, +STORE, 47430430203904, 47430431903743, +STORE, 47430430064640, 47430430203903, +ERASE, 47430430203904, 47430431903743, +STORE, 47430430203904, 47430431862783, +STORE, 47430431862784, 47430431903743, +STORE, 47430431547392, 47430431862783, +STORE, 47430430203904, 47430431547391, +ERASE, 47430430203904, 47430431547391, +STORE, 47430430203904, 47430431547391, +STORE, 47430431858688, 47430431862783, +STORE, 47430431547392, 47430431858687, +ERASE, 47430431547392, 47430431858687, +STORE, 47430431547392, 47430431858687, +STORE, 47430431887360, 47430431903743, +STORE, 47430431862784, 47430431887359, +ERASE, 47430431862784, 47430431887359, +STORE, 47430431862784, 47430431887359, +ERASE, 47430431887360, 47430431903743, +STORE, 47430431887360, 47430431903743, +STORE, 47430431903744, 47430432038911, +ERASE, 47430431903744, 47430432038911, +STORE, 47430431903744, 47430431928319, +STORE, 47430431928320, 47430432038911, +STORE, 47430431989760, 47430432038911, +STORE, 47430431928320, 47430431989759, +ERASE, 47430431928320, 47430431989759, +STORE, 47430431928320, 47430431989759, +STORE, 47430432014336, 47430432038911, +STORE, 47430431989760, 47430432014335, +ERASE, 47430431989760, 47430432014335, +STORE, 47430431989760, 47430432038911, +ERASE, 47430431989760, 47430432038911, +STORE, 47430431989760, 47430432014335, +STORE, 47430432014336, 47430432038911, +STORE, 47430432022528, 47430432038911, +STORE, 47430432014336, 47430432022527, +ERASE, 47430432014336, 47430432022527, +STORE, 47430432014336, 47430432022527, +ERASE, 47430432022528, 47430432038911, +STORE, 47430432022528, 47430432038911, +STORE, 47430432038912, 47430432059391, +ERASE, 47430432038912, 47430432059391, +STORE, 47430432038912, 47430432043007, +STORE, 47430432043008, 47430432059391, +STORE, 47430432047104, 47430432059391, +STORE, 47430432043008, 47430432047103, +ERASE, 47430432043008, 47430432047103, +STORE, 47430432043008, 47430432047103, +STORE, 47430432051200, 47430432059391, +STORE, 47430432047104, 47430432051199, +ERASE, 47430432047104, 47430432051199, +STORE, 47430432047104, 47430432059391, +ERASE, 47430432047104, 47430432059391, +STORE, 47430432047104, 47430432051199, +STORE, 47430432051200, 47430432059391, +ERASE, 47430432051200, 47430432059391, +STORE, 47430432051200, 47430432059391, +STORE, 47430432059392, 47430432067583, +ERASE, 47430431862784, 47430431887359, +STORE, 47430431862784, 47430431879167, +STORE, 47430431879168, 47430431887359, +ERASE, 47430432051200, 47430432059391, +STORE, 47430432051200, 47430432055295, +STORE, 47430432055296, 47430432059391, +ERASE, 47430432014336, 47430432022527, +STORE, 47430432014336, 47430432018431, +STORE, 47430432018432, 47430432022527, + }; + unsigned long set7[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140729808330752, 140737488351231, +ERASE, 140729808330752, 140737488351231, +STORE, 140729808330752, 140729808334847, +STORE, 94629632020480, 94629632192511, +ERASE, 94629632020480, 94629632192511, +STORE, 94629632020480, 94629632036863, +STORE, 94629632036864, 94629632192511, +ERASE, 94629632036864, 94629632192511, +STORE, 94629632036864, 94629632139263, +STORE, 94629632139264, 94629632180223, +STORE, 94629632180224, 94629632192511, +STORE, 47439981776896, 47439981948927, +ERASE, 47439981776896, 47439981948927, +STORE, 47439981776896, 47439981780991, +STORE, 47439981780992, 47439981948927, +ERASE, 47439981780992, 47439981948927, +STORE, 47439981780992, 47439981903871, +STORE, 47439981903872, 47439981936639, +STORE, 47439981936640, 47439981944831, +STORE, 47439981944832, 47439981948927, +STORE, 140729808474112, 140729808478207, +STORE, 140729808461824, 140729808474111, +STORE, 47439981948928, 47439981957119, +STORE, 47439981957120, 47439981965311, +STORE, 47439981965312, 47439982129151, +ERASE, 47439981965312, 47439982129151, +STORE, 47439981965312, 47439981977599, +STORE, 47439981977600, 47439982129151, +STORE, 47439982075904, 47439982129151, +STORE, 47439981977600, 47439982075903, +ERASE, 47439981977600, 47439982075903, +STORE, 47439981977600, 47439982075903, +STORE, 47439982120960, 47439982129151, +STORE, 47439982075904, 47439982120959, +ERASE, 47439982075904, 47439982120959, +STORE, 47439982075904, 47439982129151, +ERASE, 47439982075904, 47439982129151, +STORE, 47439982075904, 47439982120959, +STORE, 47439982120960, 47439982129151, +ERASE, 47439982120960, 47439982129151, +STORE, 47439982120960, 47439982129151, +STORE, 47439982129152, 47439985180671, +STORE, 47439982673920, 47439985180671, +STORE, 47439982129152, 47439982673919, +ERASE, 47439982673920, 47439985180671, +STORE, 47439982673920, 47439984959487, +STORE, 47439984959488, 47439985180671, +STORE, 47439984369664, 47439984959487, +STORE, 47439982673920, 47439984369663, +ERASE, 47439982673920, 47439984369663, +STORE, 47439982673920, 47439984369663, +STORE, 47439984955392, 47439984959487, +STORE, 47439984369664, 47439984955391, +ERASE, 47439984369664, 47439984955391, +STORE, 47439984369664, 47439984955391, +STORE, 47439985164288, 47439985180671, +STORE, 47439984959488, 47439985164287, +ERASE, 47439984959488, 47439985164287, +STORE, 47439984959488, 47439985164287, +ERASE, 47439985164288, 47439985180671, +STORE, 47439985164288, 47439985180671, +STORE, 47439985180672, 47439987019775, +STORE, 47439985319936, 47439987019775, +STORE, 47439985180672, 47439985319935, +ERASE, 47439985319936, 47439987019775, +STORE, 47439985319936, 47439986978815, +STORE, 47439986978816, 47439987019775, +STORE, 47439986663424, 47439986978815, +STORE, 47439985319936, 47439986663423, +ERASE, 47439985319936, 47439986663423, +STORE, 47439985319936, 47439986663423, +STORE, 47439986974720, 47439986978815, +STORE, 47439986663424, 47439986974719, +ERASE, 47439986663424, 47439986974719, +STORE, 47439986663424, 47439986974719, +STORE, 47439987003392, 47439987019775, +STORE, 47439986978816, 47439987003391, +ERASE, 47439986978816, 47439987003391, +STORE, 47439986978816, 47439987003391, +ERASE, 47439987003392, 47439987019775, +STORE, 47439987003392, 47439987019775, +STORE, 47439987019776, 47439987154943, +ERASE, 47439987019776, 47439987154943, +STORE, 47439987019776, 47439987044351, +STORE, 47439987044352, 47439987154943, +STORE, 47439987105792, 47439987154943, +STORE, 47439987044352, 47439987105791, +ERASE, 47439987044352, 47439987105791, +STORE, 47439987044352, 47439987105791, +STORE, 47439987130368, 47439987154943, +STORE, 47439987105792, 47439987130367, +ERASE, 47439987105792, 47439987130367, +STORE, 47439987105792, 47439987154943, +ERASE, 47439987105792, 47439987154943, +STORE, 47439987105792, 47439987130367, +STORE, 47439987130368, 47439987154943, +STORE, 47439987138560, 47439987154943, +STORE, 47439987130368, 47439987138559, +ERASE, 47439987130368, 47439987138559, +STORE, 47439987130368, 47439987138559, +ERASE, 47439987138560, 47439987154943, +STORE, 47439987138560, 47439987154943, +STORE, 47439987154944, 47439987175423, +ERASE, 47439987154944, 47439987175423, +STORE, 47439987154944, 47439987159039, +STORE, 47439987159040, 47439987175423, +STORE, 47439987163136, 47439987175423, +STORE, 47439987159040, 47439987163135, +ERASE, 47439987159040, 47439987163135, +STORE, 47439987159040, 47439987163135, +STORE, 47439987167232, 47439987175423, +STORE, 47439987163136, 47439987167231, +ERASE, 47439987163136, 47439987167231, +STORE, 47439987163136, 47439987175423, +ERASE, 47439987163136, 47439987175423, +STORE, 47439987163136, 47439987167231, +STORE, 47439987167232, 47439987175423, +ERASE, 47439987167232, 47439987175423, +STORE, 47439987167232, 47439987175423, +STORE, 47439987175424, 47439987183615, +ERASE, 47439986978816, 47439987003391, +STORE, 47439986978816, 47439986995199, +STORE, 47439986995200, 47439987003391, +ERASE, 47439987167232, 47439987175423, +STORE, 47439987167232, 47439987171327, +STORE, 47439987171328, 47439987175423, +ERASE, 47439987130368, 47439987138559, +STORE, 47439987130368, 47439987134463, +STORE, 47439987134464, 47439987138559, + }; + unsigned long set8[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140722482974720, 140737488351231, +ERASE, 140722482974720, 140737488351231, +STORE, 140722482974720, 140722482978815, +STORE, 94121505034240, 94121505206271, +ERASE, 94121505034240, 94121505206271, +STORE, 94121505034240, 94121505050623, +STORE, 94121505050624, 94121505206271, +ERASE, 94121505050624, 94121505206271, +STORE, 94121505050624, 94121505153023, +STORE, 94121505153024, 94121505193983, +STORE, 94121505193984, 94121505206271, +STORE, 47708483284992, 47708483457023, +ERASE, 47708483284992, 47708483457023, +STORE, 47708483284992, 47708483289087, +STORE, 47708483289088, 47708483457023, +ERASE, 47708483289088, 47708483457023, +STORE, 47708483289088, 47708483411967, +STORE, 47708483411968, 47708483444735, +STORE, 47708483444736, 47708483452927, +STORE, 47708483452928, 47708483457023, +STORE, 140722483142656, 140722483146751, +STORE, 140722483130368, 140722483142655, +STORE, 47708483457024, 47708483465215, +STORE, 47708483465216, 47708483473407, +STORE, 47708483473408, 47708483637247, +ERASE, 47708483473408, 47708483637247, +STORE, 47708483473408, 47708483485695, +STORE, 47708483485696, 47708483637247, +STORE, 47708483584000, 47708483637247, +STORE, 47708483485696, 47708483583999, +ERASE, 47708483485696, 47708483583999, +STORE, 47708483485696, 47708483583999, +STORE, 47708483629056, 47708483637247, +STORE, 47708483584000, 47708483629055, +ERASE, 47708483584000, 47708483629055, +STORE, 47708483584000, 47708483637247, +ERASE, 47708483584000, 47708483637247, +STORE, 47708483584000, 47708483629055, +STORE, 47708483629056, 47708483637247, +ERASE, 47708483629056, 47708483637247, +STORE, 47708483629056, 47708483637247, +STORE, 47708483637248, 47708486688767, +STORE, 47708484182016, 47708486688767, +STORE, 47708483637248, 47708484182015, +ERASE, 47708484182016, 47708486688767, +STORE, 47708484182016, 47708486467583, +STORE, 47708486467584, 47708486688767, +STORE, 47708485877760, 47708486467583, +STORE, 47708484182016, 47708485877759, +ERASE, 47708484182016, 47708485877759, +STORE, 47708484182016, 47708485877759, +STORE, 47708486463488, 47708486467583, +STORE, 47708485877760, 47708486463487, +ERASE, 47708485877760, 47708486463487, +STORE, 47708485877760, 47708486463487, +STORE, 47708486672384, 47708486688767, +STORE, 47708486467584, 47708486672383, +ERASE, 47708486467584, 47708486672383, +STORE, 47708486467584, 47708486672383, +ERASE, 47708486672384, 47708486688767, +STORE, 47708486672384, 47708486688767, +STORE, 47708486688768, 47708488527871, +STORE, 47708486828032, 47708488527871, +STORE, 47708486688768, 47708486828031, +ERASE, 47708486828032, 47708488527871, +STORE, 47708486828032, 47708488486911, +STORE, 47708488486912, 47708488527871, +STORE, 47708488171520, 47708488486911, +STORE, 47708486828032, 47708488171519, +ERASE, 47708486828032, 47708488171519, +STORE, 47708486828032, 47708488171519, +STORE, 47708488482816, 47708488486911, +STORE, 47708488171520, 47708488482815, +ERASE, 47708488171520, 47708488482815, +STORE, 47708488171520, 47708488482815, +STORE, 47708488511488, 47708488527871, +STORE, 47708488486912, 47708488511487, +ERASE, 47708488486912, 47708488511487, +STORE, 47708488486912, 47708488511487, +ERASE, 47708488511488, 47708488527871, +STORE, 47708488511488, 47708488527871, +STORE, 47708488527872, 47708488663039, +ERASE, 47708488527872, 47708488663039, +STORE, 47708488527872, 47708488552447, +STORE, 47708488552448, 47708488663039, +STORE, 47708488613888, 47708488663039, +STORE, 47708488552448, 47708488613887, +ERASE, 47708488552448, 47708488613887, +STORE, 47708488552448, 47708488613887, +STORE, 47708488638464, 47708488663039, +STORE, 47708488613888, 47708488638463, +ERASE, 47708488613888, 47708488638463, +STORE, 47708488613888, 47708488663039, +ERASE, 47708488613888, 47708488663039, +STORE, 47708488613888, 47708488638463, +STORE, 47708488638464, 47708488663039, +STORE, 47708488646656, 47708488663039, +STORE, 47708488638464, 47708488646655, +ERASE, 47708488638464, 47708488646655, +STORE, 47708488638464, 47708488646655, +ERASE, 47708488646656, 47708488663039, +STORE, 47708488646656, 47708488663039, +STORE, 47708488663040, 47708488683519, +ERASE, 47708488663040, 47708488683519, +STORE, 47708488663040, 47708488667135, +STORE, 47708488667136, 47708488683519, +STORE, 47708488671232, 47708488683519, +STORE, 47708488667136, 47708488671231, +ERASE, 47708488667136, 47708488671231, +STORE, 47708488667136, 47708488671231, +STORE, 47708488675328, 47708488683519, +STORE, 47708488671232, 47708488675327, +ERASE, 47708488671232, 47708488675327, +STORE, 47708488671232, 47708488683519, +ERASE, 47708488671232, 47708488683519, +STORE, 47708488671232, 47708488675327, +STORE, 47708488675328, 47708488683519, +ERASE, 47708488675328, 47708488683519, +STORE, 47708488675328, 47708488683519, +STORE, 47708488683520, 47708488691711, +ERASE, 47708488486912, 47708488511487, +STORE, 47708488486912, 47708488503295, +STORE, 47708488503296, 47708488511487, +ERASE, 47708488675328, 47708488683519, +STORE, 47708488675328, 47708488679423, +STORE, 47708488679424, 47708488683519, +ERASE, 47708488638464, 47708488646655, +STORE, 47708488638464, 47708488642559, +STORE, 47708488642560, 47708488646655, + }; + + unsigned long set9[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140736427839488, 140737488351231, +ERASE, 140736427839488, 140736427839488, +STORE, 140736427839488, 140736427843583, +STORE, 94071213395968, 94071213567999, +ERASE, 94071213395968, 94071213395968, +STORE, 94071213395968, 94071213412351, +STORE, 94071213412352, 94071213567999, +ERASE, 94071213412352, 94071213412352, +STORE, 94071213412352, 94071213514751, +STORE, 94071213514752, 94071213555711, +STORE, 94071213555712, 94071213567999, +STORE, 139968410644480, 139968410816511, +ERASE, 139968410644480, 139968410644480, +STORE, 139968410644480, 139968410648575, +STORE, 139968410648576, 139968410816511, +ERASE, 139968410648576, 139968410648576, +STORE, 139968410648576, 139968410771455, +STORE, 139968410771456, 139968410804223, +STORE, 139968410804224, 139968410812415, +STORE, 139968410812416, 139968410816511, +STORE, 140736429277184, 140736429281279, +STORE, 140736429264896, 140736429277183, +STORE, 47664384352256, 47664384360447, +STORE, 47664384360448, 47664384368639, +STORE, 47664384368640, 47664384532479, +ERASE, 47664384368640, 47664384368640, +STORE, 47664384368640, 47664384380927, +STORE, 47664384380928, 47664384532479, +STORE, 47664384479232, 47664384532479, +STORE, 47664384380928, 47664384479231, +ERASE, 47664384380928, 47664384380928, +STORE, 47664384380928, 47664384479231, +STORE, 47664384524288, 47664384532479, +STORE, 47664384479232, 47664384524287, +ERASE, 47664384479232, 47664384479232, +STORE, 47664384479232, 47664384532479, +ERASE, 47664384479232, 47664384479232, +STORE, 47664384479232, 47664384524287, +STORE, 47664384524288, 47664384532479, +ERASE, 47664384524288, 47664384524288, +STORE, 47664384524288, 47664384532479, +STORE, 47664384532480, 47664387583999, +STORE, 47664385077248, 47664387583999, +STORE, 47664384532480, 47664385077247, +ERASE, 47664385077248, 47664385077248, +STORE, 47664385077248, 47664387362815, +STORE, 47664387362816, 47664387583999, +STORE, 47664386772992, 47664387362815, +STORE, 47664385077248, 47664386772991, +ERASE, 47664385077248, 47664385077248, +STORE, 47664385077248, 47664386772991, +STORE, 47664387358720, 47664387362815, +STORE, 47664386772992, 47664387358719, +ERASE, 47664386772992, 47664386772992, +STORE, 47664386772992, 47664387358719, +STORE, 47664387567616, 47664387583999, +STORE, 47664387362816, 47664387567615, +ERASE, 47664387362816, 47664387362816, +STORE, 47664387362816, 47664387567615, +ERASE, 47664387567616, 47664387567616, +STORE, 47664387567616, 47664387583999, +STORE, 47664387584000, 47664389423103, +STORE, 47664387723264, 47664389423103, +STORE, 47664387584000, 47664387723263, +ERASE, 47664387723264, 47664387723264, +STORE, 47664387723264, 47664389382143, +STORE, 47664389382144, 47664389423103, +STORE, 47664389066752, 47664389382143, +STORE, 47664387723264, 47664389066751, +ERASE, 47664387723264, 47664387723264, +STORE, 47664387723264, 47664389066751, +STORE, 47664389378048, 47664389382143, +STORE, 47664389066752, 47664389378047, +ERASE, 47664389066752, 47664389066752, +STORE, 47664389066752, 47664389378047, +STORE, 47664389406720, 47664389423103, +STORE, 47664389382144, 47664389406719, +ERASE, 47664389382144, 47664389382144, +STORE, 47664389382144, 47664389406719, +ERASE, 47664389406720, 47664389406720, +STORE, 47664389406720, 47664389423103, +STORE, 47664389423104, 47664389558271, +ERASE, 47664389423104, 47664389423104, +STORE, 47664389423104, 47664389447679, +STORE, 47664389447680, 47664389558271, +STORE, 47664389509120, 47664389558271, +STORE, 47664389447680, 47664389509119, +ERASE, 47664389447680, 47664389447680, +STORE, 47664389447680, 47664389509119, +STORE, 47664389533696, 47664389558271, +STORE, 47664389509120, 47664389533695, +ERASE, 47664389509120, 47664389509120, +STORE, 47664389509120, 47664389558271, +ERASE, 47664389509120, 47664389509120, +STORE, 47664389509120, 47664389533695, +STORE, 47664389533696, 47664389558271, +STORE, 47664389541888, 47664389558271, +STORE, 47664389533696, 47664389541887, +ERASE, 47664389533696, 47664389533696, +STORE, 47664389533696, 47664389541887, +ERASE, 47664389541888, 47664389541888, +STORE, 47664389541888, 47664389558271, +STORE, 47664389558272, 47664389578751, +ERASE, 47664389558272, 47664389558272, +STORE, 47664389558272, 47664389562367, +STORE, 47664389562368, 47664389578751, +STORE, 47664389566464, 47664389578751, +STORE, 47664389562368, 47664389566463, +ERASE, 47664389562368, 47664389562368, +STORE, 47664389562368, 47664389566463, +STORE, 47664389570560, 47664389578751, +STORE, 47664389566464, 47664389570559, +ERASE, 47664389566464, 47664389566464, +STORE, 47664389566464, 47664389578751, +ERASE, 47664389566464, 47664389566464, +STORE, 47664389566464, 47664389570559, +STORE, 47664389570560, 47664389578751, +ERASE, 47664389570560, 47664389570560, +STORE, 47664389570560, 47664389578751, +STORE, 47664389578752, 47664389586943, +ERASE, 47664389382144, 47664389382144, +STORE, 47664389382144, 47664389398527, +STORE, 47664389398528, 47664389406719, +ERASE, 47664389570560, 47664389570560, +STORE, 47664389570560, 47664389574655, +STORE, 47664389574656, 47664389578751, +ERASE, 47664389533696, 47664389533696, +STORE, 47664389533696, 47664389537791, +STORE, 47664389537792, 47664389541887, +ERASE, 47664387362816, 47664387362816, +STORE, 47664387362816, 47664387559423, +STORE, 47664387559424, 47664387567615, +ERASE, 47664384524288, 47664384524288, +STORE, 47664384524288, 47664384528383, +STORE, 47664384528384, 47664384532479, +ERASE, 94071213555712, 94071213555712, +STORE, 94071213555712, 94071213563903, +STORE, 94071213563904, 94071213567999, +ERASE, 139968410804224, 139968410804224, +STORE, 139968410804224, 139968410808319, +STORE, 139968410808320, 139968410812415, +ERASE, 47664384352256, 47664384352256, +STORE, 94071244402688, 94071244537855, +STORE, 140737488347136, 140737488351231, +STORE, 140728271503360, 140737488351231, +ERASE, 140728271503360, 140728271503360, +STORE, 140728271503360, 140728271507455, +STORE, 94410361982976, 94410362155007, +ERASE, 94410361982976, 94410361982976, +STORE, 94410361982976, 94410361999359, +STORE, 94410361999360, 94410362155007, +ERASE, 94410361999360, 94410361999360, +STORE, 94410361999360, 94410362101759, +STORE, 94410362101760, 94410362142719, +STORE, 94410362142720, 94410362155007, +STORE, 140351953997824, 140351954169855, +ERASE, 140351953997824, 140351953997824, +STORE, 140351953997824, 140351954001919, +STORE, 140351954001920, 140351954169855, +ERASE, 140351954001920, 140351954001920, +STORE, 140351954001920, 140351954124799, +STORE, 140351954124800, 140351954157567, +STORE, 140351954157568, 140351954165759, +STORE, 140351954165760, 140351954169855, +STORE, 140728272429056, 140728272433151, +STORE, 140728272416768, 140728272429055, +STORE, 47280840998912, 47280841007103, +STORE, 47280841007104, 47280841015295, +STORE, 47280841015296, 47280841179135, +ERASE, 47280841015296, 47280841015296, +STORE, 47280841015296, 47280841027583, +STORE, 47280841027584, 47280841179135, +STORE, 47280841125888, 47280841179135, +STORE, 47280841027584, 47280841125887, +ERASE, 47280841027584, 47280841027584, +STORE, 47280841027584, 47280841125887, +STORE, 47280841170944, 47280841179135, +STORE, 47280841125888, 47280841170943, +ERASE, 47280841125888, 47280841125888, +STORE, 47280841125888, 47280841179135, +ERASE, 47280841125888, 47280841125888, +STORE, 47280841125888, 47280841170943, +STORE, 47280841170944, 47280841179135, +ERASE, 47280841170944, 47280841170944, +STORE, 47280841170944, 47280841179135, +STORE, 47280841179136, 47280844230655, +STORE, 47280841723904, 47280844230655, +STORE, 47280841179136, 47280841723903, +ERASE, 47280841723904, 47280841723904, +STORE, 47280841723904, 47280844009471, +STORE, 47280844009472, 47280844230655, +STORE, 47280843419648, 47280844009471, +STORE, 47280841723904, 47280843419647, +ERASE, 47280841723904, 47280841723904, +STORE, 47280841723904, 47280843419647, +STORE, 47280844005376, 47280844009471, +STORE, 47280843419648, 47280844005375, +ERASE, 47280843419648, 47280843419648, +STORE, 47280843419648, 47280844005375, +STORE, 47280844214272, 47280844230655, +STORE, 47280844009472, 47280844214271, +ERASE, 47280844009472, 47280844009472, +STORE, 47280844009472, 47280844214271, +ERASE, 47280844214272, 47280844214272, +STORE, 47280844214272, 47280844230655, +STORE, 47280844230656, 47280846069759, +STORE, 47280844369920, 47280846069759, +STORE, 47280844230656, 47280844369919, +ERASE, 47280844369920, 47280844369920, +STORE, 47280844369920, 47280846028799, +STORE, 47280846028800, 47280846069759, +STORE, 47280845713408, 47280846028799, +STORE, 47280844369920, 47280845713407, +ERASE, 47280844369920, 47280844369920, +STORE, 47280844369920, 47280845713407, +STORE, 47280846024704, 47280846028799, +STORE, 47280845713408, 47280846024703, +ERASE, 47280845713408, 47280845713408, +STORE, 47280845713408, 47280846024703, +STORE, 47280846053376, 47280846069759, +STORE, 47280846028800, 47280846053375, +ERASE, 47280846028800, 47280846028800, +STORE, 47280846028800, 47280846053375, +ERASE, 47280846053376, 47280846053376, +STORE, 47280846053376, 47280846069759, +STORE, 47280846069760, 47280846204927, +ERASE, 47280846069760, 47280846069760, +STORE, 47280846069760, 47280846094335, +STORE, 47280846094336, 47280846204927, +STORE, 47280846155776, 47280846204927, +STORE, 47280846094336, 47280846155775, +ERASE, 47280846094336, 47280846094336, +STORE, 47280846094336, 47280846155775, +STORE, 47280846180352, 47280846204927, +STORE, 47280846155776, 47280846180351, +ERASE, 47280846155776, 47280846155776, +STORE, 47280846155776, 47280846204927, +ERASE, 47280846155776, 47280846155776, +STORE, 47280846155776, 47280846180351, +STORE, 47280846180352, 47280846204927, +STORE, 47280846188544, 47280846204927, +STORE, 47280846180352, 47280846188543, +ERASE, 47280846180352, 47280846180352, +STORE, 47280846180352, 47280846188543, +ERASE, 47280846188544, 47280846188544, +STORE, 47280846188544, 47280846204927, +STORE, 47280846204928, 47280846225407, +ERASE, 47280846204928, 47280846204928, +STORE, 47280846204928, 47280846209023, +STORE, 47280846209024, 47280846225407, +STORE, 47280846213120, 47280846225407, +STORE, 47280846209024, 47280846213119, +ERASE, 47280846209024, 47280846209024, +STORE, 47280846209024, 47280846213119, +STORE, 47280846217216, 47280846225407, +STORE, 47280846213120, 47280846217215, +ERASE, 47280846213120, 47280846213120, +STORE, 47280846213120, 47280846225407, +ERASE, 47280846213120, 47280846213120, +STORE, 47280846213120, 47280846217215, +STORE, 47280846217216, 47280846225407, +ERASE, 47280846217216, 47280846217216, +STORE, 47280846217216, 47280846225407, +STORE, 47280846225408, 47280846233599, +ERASE, 47280846028800, 47280846028800, +STORE, 47280846028800, 47280846045183, +STORE, 47280846045184, 47280846053375, +ERASE, 47280846217216, 47280846217216, +STORE, 47280846217216, 47280846221311, +STORE, 47280846221312, 47280846225407, +ERASE, 47280846180352, 47280846180352, +STORE, 47280846180352, 47280846184447, +STORE, 47280846184448, 47280846188543, +ERASE, 47280844009472, 47280844009472, +STORE, 47280844009472, 47280844206079, +STORE, 47280844206080, 47280844214271, +ERASE, 47280841170944, 47280841170944, +STORE, 47280841170944, 47280841175039, +STORE, 47280841175040, 47280841179135, +ERASE, 94410362142720, 94410362142720, +STORE, 94410362142720, 94410362150911, +STORE, 94410362150912, 94410362155007, +ERASE, 140351954157568, 140351954157568, +STORE, 140351954157568, 140351954161663, +STORE, 140351954161664, 140351954165759, +ERASE, 47280840998912, 47280840998912, +STORE, 94410379456512, 94410379591679, +STORE, 140737488347136, 140737488351231, +STORE, 140732946362368, 140737488351231, +ERASE, 140732946362368, 140732946362368, +STORE, 140732946362368, 140732946366463, +STORE, 94352937934848, 94352938106879, +ERASE, 94352937934848, 94352937934848, +STORE, 94352937934848, 94352937951231, +STORE, 94352937951232, 94352938106879, +ERASE, 94352937951232, 94352937951232, +STORE, 94352937951232, 94352938053631, +STORE, 94352938053632, 94352938094591, +STORE, 94352938094592, 94352938106879, +STORE, 140595518742528, 140595518914559, +ERASE, 140595518742528, 140595518742528, +STORE, 140595518742528, 140595518746623, +STORE, 140595518746624, 140595518914559, +ERASE, 140595518746624, 140595518746624, +STORE, 140595518746624, 140595518869503, +STORE, 140595518869504, 140595518902271, +STORE, 140595518902272, 140595518910463, +STORE, 140595518910464, 140595518914559, +STORE, 140732947468288, 140732947472383, +STORE, 140732947456000, 140732947468287, +STORE, 47037276254208, 47037276262399, +STORE, 47037276262400, 47037276270591, +STORE, 47037276270592, 47037276434431, +ERASE, 47037276270592, 47037276270592, +STORE, 47037276270592, 47037276282879, +STORE, 47037276282880, 47037276434431, +STORE, 47037276381184, 47037276434431, +STORE, 47037276282880, 47037276381183, +ERASE, 47037276282880, 47037276282880, +STORE, 47037276282880, 47037276381183, +STORE, 47037276426240, 47037276434431, +STORE, 47037276381184, 47037276426239, +ERASE, 47037276381184, 47037276381184, +STORE, 47037276381184, 47037276434431, +ERASE, 47037276381184, 47037276381184, +STORE, 47037276381184, 47037276426239, +STORE, 47037276426240, 47037276434431, +ERASE, 47037276426240, 47037276426240, +STORE, 47037276426240, 47037276434431, +STORE, 47037276434432, 47037279485951, +STORE, 47037276979200, 47037279485951, +STORE, 47037276434432, 47037276979199, +ERASE, 47037276979200, 47037276979200, +STORE, 47037276979200, 47037279264767, +STORE, 47037279264768, 47037279485951, +STORE, 47037278674944, 47037279264767, +STORE, 47037276979200, 47037278674943, +ERASE, 47037276979200, 47037276979200, +STORE, 47037276979200, 47037278674943, +STORE, 47037279260672, 47037279264767, +STORE, 47037278674944, 47037279260671, +ERASE, 47037278674944, 47037278674944, +STORE, 47037278674944, 47037279260671, +STORE, 47037279469568, 47037279485951, +STORE, 47037279264768, 47037279469567, +ERASE, 47037279264768, 47037279264768, +STORE, 47037279264768, 47037279469567, +ERASE, 47037279469568, 47037279469568, +STORE, 47037279469568, 47037279485951, +STORE, 47037279485952, 47037281325055, +STORE, 47037279625216, 47037281325055, +STORE, 47037279485952, 47037279625215, +ERASE, 47037279625216, 47037279625216, +STORE, 47037279625216, 47037281284095, +STORE, 47037281284096, 47037281325055, +STORE, 47037280968704, 47037281284095, +STORE, 47037279625216, 47037280968703, +ERASE, 47037279625216, 47037279625216, +STORE, 47037279625216, 47037280968703, +STORE, 47037281280000, 47037281284095, +STORE, 47037280968704, 47037281279999, +ERASE, 47037280968704, 47037280968704, +STORE, 47037280968704, 47037281279999, +STORE, 47037281308672, 47037281325055, +STORE, 47037281284096, 47037281308671, +ERASE, 47037281284096, 47037281284096, +STORE, 47037281284096, 47037281308671, +ERASE, 47037281308672, 47037281308672, +STORE, 47037281308672, 47037281325055, +STORE, 47037281325056, 47037281460223, +ERASE, 47037281325056, 47037281325056, +STORE, 47037281325056, 47037281349631, +STORE, 47037281349632, 47037281460223, +STORE, 47037281411072, 47037281460223, +STORE, 47037281349632, 47037281411071, +ERASE, 47037281349632, 47037281349632, +STORE, 47037281349632, 47037281411071, +STORE, 47037281435648, 47037281460223, +STORE, 47037281411072, 47037281435647, +ERASE, 47037281411072, 47037281411072, +STORE, 47037281411072, 47037281460223, +ERASE, 47037281411072, 47037281411072, +STORE, 47037281411072, 47037281435647, +STORE, 47037281435648, 47037281460223, +STORE, 47037281443840, 47037281460223, +STORE, 47037281435648, 47037281443839, +ERASE, 47037281435648, 47037281435648, +STORE, 47037281435648, 47037281443839, +ERASE, 47037281443840, 47037281443840, +STORE, 47037281443840, 47037281460223, +STORE, 47037281460224, 47037281480703, +ERASE, 47037281460224, 47037281460224, +STORE, 47037281460224, 47037281464319, +STORE, 47037281464320, 47037281480703, +STORE, 47037281468416, 47037281480703, +STORE, 47037281464320, 47037281468415, +ERASE, 47037281464320, 47037281464320, +STORE, 47037281464320, 47037281468415, +STORE, 47037281472512, 47037281480703, +STORE, 47037281468416, 47037281472511, +ERASE, 47037281468416, 47037281468416, +STORE, 47037281468416, 47037281480703, +ERASE, 47037281468416, 47037281468416, +STORE, 47037281468416, 47037281472511, +STORE, 47037281472512, 47037281480703, +ERASE, 47037281472512, 47037281472512, +STORE, 47037281472512, 47037281480703, +STORE, 47037281480704, 47037281488895, +ERASE, 47037281284096, 47037281284096, +STORE, 47037281284096, 47037281300479, +STORE, 47037281300480, 47037281308671, +ERASE, 47037281472512, 47037281472512, +STORE, 47037281472512, 47037281476607, +STORE, 47037281476608, 47037281480703, +ERASE, 47037281435648, 47037281435648, +STORE, 47037281435648, 47037281439743, +STORE, 47037281439744, 47037281443839, +ERASE, 47037279264768, 47037279264768, +STORE, 47037279264768, 47037279461375, +STORE, 47037279461376, 47037279469567, +ERASE, 47037276426240, 47037276426240, +STORE, 47037276426240, 47037276430335, +STORE, 47037276430336, 47037276434431, +ERASE, 94352938094592, 94352938094592, +STORE, 94352938094592, 94352938102783, +STORE, 94352938102784, 94352938106879, +ERASE, 140595518902272, 140595518902272, +STORE, 140595518902272, 140595518906367, +STORE, 140595518906368, 140595518910463, +ERASE, 47037276254208, 47037276254208, +STORE, 94352938438656, 94352938573823, +STORE, 140737488347136, 140737488351231, +STORE, 140733506027520, 140737488351231, +ERASE, 140733506027520, 140733506027520, +STORE, 140733506027520, 140733506031615, +STORE, 94150123073536, 94150123245567, +ERASE, 94150123073536, 94150123073536, +STORE, 94150123073536, 94150123089919, +STORE, 94150123089920, 94150123245567, +ERASE, 94150123089920, 94150123089920, +STORE, 94150123089920, 94150123192319, +STORE, 94150123192320, 94150123233279, +STORE, 94150123233280, 94150123245567, +STORE, 140081290375168, 140081290547199, +ERASE, 140081290375168, 140081290375168, +STORE, 140081290375168, 140081290379263, +STORE, 140081290379264, 140081290547199, +ERASE, 140081290379264, 140081290379264, +STORE, 140081290379264, 140081290502143, +STORE, 140081290502144, 140081290534911, +STORE, 140081290534912, 140081290543103, +STORE, 140081290543104, 140081290547199, +STORE, 140733506707456, 140733506711551, +STORE, 140733506695168, 140733506707455, +STORE, 47551504621568, 47551504629759, +STORE, 47551504629760, 47551504637951, +STORE, 47551504637952, 47551504801791, +ERASE, 47551504637952, 47551504637952, +STORE, 47551504637952, 47551504650239, +STORE, 47551504650240, 47551504801791, +STORE, 47551504748544, 47551504801791, +STORE, 47551504650240, 47551504748543, +ERASE, 47551504650240, 47551504650240, +STORE, 47551504650240, 47551504748543, +STORE, 47551504793600, 47551504801791, +STORE, 47551504748544, 47551504793599, +ERASE, 47551504748544, 47551504748544, +STORE, 47551504748544, 47551504801791, +ERASE, 47551504748544, 47551504748544, +STORE, 47551504748544, 47551504793599, +STORE, 47551504793600, 47551504801791, +ERASE, 47551504793600, 47551504793600, +STORE, 47551504793600, 47551504801791, +STORE, 47551504801792, 47551507853311, +STORE, 47551505346560, 47551507853311, +STORE, 47551504801792, 47551505346559, +ERASE, 47551505346560, 47551505346560, +STORE, 47551505346560, 47551507632127, +STORE, 47551507632128, 47551507853311, +STORE, 47551507042304, 47551507632127, +STORE, 47551505346560, 47551507042303, +ERASE, 47551505346560, 47551505346560, +STORE, 47551505346560, 47551507042303, +STORE, 47551507628032, 47551507632127, +STORE, 47551507042304, 47551507628031, +ERASE, 47551507042304, 47551507042304, +STORE, 47551507042304, 47551507628031, +STORE, 47551507836928, 47551507853311, +STORE, 47551507632128, 47551507836927, +ERASE, 47551507632128, 47551507632128, +STORE, 47551507632128, 47551507836927, +ERASE, 47551507836928, 47551507836928, +STORE, 47551507836928, 47551507853311, +STORE, 47551507853312, 47551509692415, +STORE, 47551507992576, 47551509692415, +STORE, 47551507853312, 47551507992575, +ERASE, 47551507992576, 47551507992576, +STORE, 47551507992576, 47551509651455, +STORE, 47551509651456, 47551509692415, +STORE, 47551509336064, 47551509651455, +STORE, 47551507992576, 47551509336063, +ERASE, 47551507992576, 47551507992576, +STORE, 47551507992576, 47551509336063, +STORE, 47551509647360, 47551509651455, +STORE, 47551509336064, 47551509647359, +ERASE, 47551509336064, 47551509336064, +STORE, 47551509336064, 47551509647359, +STORE, 47551509676032, 47551509692415, +STORE, 47551509651456, 47551509676031, +ERASE, 47551509651456, 47551509651456, +STORE, 47551509651456, 47551509676031, +ERASE, 47551509676032, 47551509676032, +STORE, 47551509676032, 47551509692415, +STORE, 47551509692416, 47551509827583, +ERASE, 47551509692416, 47551509692416, +STORE, 47551509692416, 47551509716991, +STORE, 47551509716992, 47551509827583, +STORE, 47551509778432, 47551509827583, +STORE, 47551509716992, 47551509778431, +ERASE, 47551509716992, 47551509716992, +STORE, 47551509716992, 47551509778431, +STORE, 47551509803008, 47551509827583, +STORE, 47551509778432, 47551509803007, +ERASE, 47551509778432, 47551509778432, +STORE, 47551509778432, 47551509827583, +ERASE, 47551509778432, 47551509778432, +STORE, 47551509778432, 47551509803007, +STORE, 47551509803008, 47551509827583, +STORE, 47551509811200, 47551509827583, +STORE, 47551509803008, 47551509811199, +ERASE, 47551509803008, 47551509803008, +STORE, 47551509803008, 47551509811199, +ERASE, 47551509811200, 47551509811200, +STORE, 47551509811200, 47551509827583, +STORE, 47551509827584, 47551509848063, +ERASE, 47551509827584, 47551509827584, +STORE, 47551509827584, 47551509831679, +STORE, 47551509831680, 47551509848063, +STORE, 47551509835776, 47551509848063, +STORE, 47551509831680, 47551509835775, +ERASE, 47551509831680, 47551509831680, +STORE, 47551509831680, 47551509835775, +STORE, 47551509839872, 47551509848063, +STORE, 47551509835776, 47551509839871, +ERASE, 47551509835776, 47551509835776, +STORE, 47551509835776, 47551509848063, +ERASE, 47551509835776, 47551509835776, +STORE, 47551509835776, 47551509839871, +STORE, 47551509839872, 47551509848063, +ERASE, 47551509839872, 47551509839872, +STORE, 47551509839872, 47551509848063, +STORE, 47551509848064, 47551509856255, +ERASE, 47551509651456, 47551509651456, +STORE, 47551509651456, 47551509667839, +STORE, 47551509667840, 47551509676031, +ERASE, 47551509839872, 47551509839872, +STORE, 47551509839872, 47551509843967, +STORE, 47551509843968, 47551509848063, +ERASE, 47551509803008, 47551509803008, +STORE, 47551509803008, 47551509807103, +STORE, 47551509807104, 47551509811199, +ERASE, 47551507632128, 47551507632128, +STORE, 47551507632128, 47551507828735, +STORE, 47551507828736, 47551507836927, +ERASE, 47551504793600, 47551504793600, +STORE, 47551504793600, 47551504797695, +STORE, 47551504797696, 47551504801791, +ERASE, 94150123233280, 94150123233280, +STORE, 94150123233280, 94150123241471, +STORE, 94150123241472, 94150123245567, +ERASE, 140081290534912, 140081290534912, +STORE, 140081290534912, 140081290539007, +STORE, 140081290539008, 140081290543103, +ERASE, 47551504621568, 47551504621568, +STORE, 94150148112384, 94150148247551, +STORE, 140737488347136, 140737488351231, +STORE, 140734389334016, 140737488351231, +ERASE, 140734389334016, 140734389334016, +STORE, 140734389334016, 140734389338111, +STORE, 94844636606464, 94844636778495, +ERASE, 94844636606464, 94844636606464, +STORE, 94844636606464, 94844636622847, +STORE, 94844636622848, 94844636778495, +ERASE, 94844636622848, 94844636622848, +STORE, 94844636622848, 94844636725247, +STORE, 94844636725248, 94844636766207, +STORE, 94844636766208, 94844636778495, +STORE, 139922765217792, 139922765389823, +ERASE, 139922765217792, 139922765217792, +STORE, 139922765217792, 139922765221887, +STORE, 139922765221888, 139922765389823, +ERASE, 139922765221888, 139922765221888, +STORE, 139922765221888, 139922765344767, +STORE, 139922765344768, 139922765377535, +STORE, 139922765377536, 139922765385727, +STORE, 139922765385728, 139922765389823, +STORE, 140734389678080, 140734389682175, +STORE, 140734389665792, 140734389678079, +STORE, 47710029778944, 47710029787135, +STORE, 47710029787136, 47710029795327, +STORE, 47710029795328, 47710029959167, +ERASE, 47710029795328, 47710029795328, +STORE, 47710029795328, 47710029807615, +STORE, 47710029807616, 47710029959167, +STORE, 47710029905920, 47710029959167, +STORE, 47710029807616, 47710029905919, +ERASE, 47710029807616, 47710029807616, +STORE, 47710029807616, 47710029905919, +STORE, 47710029950976, 47710029959167, +STORE, 47710029905920, 47710029950975, +ERASE, 47710029905920, 47710029905920, +STORE, 47710029905920, 47710029959167, +ERASE, 47710029905920, 47710029905920, +STORE, 47710029905920, 47710029950975, +STORE, 47710029950976, 47710029959167, +ERASE, 47710029950976, 47710029950976, +STORE, 47710029950976, 47710029959167, +STORE, 47710029959168, 47710033010687, +STORE, 47710030503936, 47710033010687, +STORE, 47710029959168, 47710030503935, +ERASE, 47710030503936, 47710030503936, +STORE, 47710030503936, 47710032789503, +STORE, 47710032789504, 47710033010687, +STORE, 47710032199680, 47710032789503, +STORE, 47710030503936, 47710032199679, +ERASE, 47710030503936, 47710030503936, +STORE, 47710030503936, 47710032199679, +STORE, 47710032785408, 47710032789503, +STORE, 47710032199680, 47710032785407, +ERASE, 47710032199680, 47710032199680, +STORE, 47710032199680, 47710032785407, +STORE, 47710032994304, 47710033010687, +STORE, 47710032789504, 47710032994303, +ERASE, 47710032789504, 47710032789504, +STORE, 47710032789504, 47710032994303, +ERASE, 47710032994304, 47710032994304, +STORE, 47710032994304, 47710033010687, +STORE, 47710033010688, 47710034849791, +STORE, 47710033149952, 47710034849791, +STORE, 47710033010688, 47710033149951, +ERASE, 47710033149952, 47710033149952, +STORE, 47710033149952, 47710034808831, +STORE, 47710034808832, 47710034849791, +STORE, 47710034493440, 47710034808831, +STORE, 47710033149952, 47710034493439, +ERASE, 47710033149952, 47710033149952, +STORE, 47710033149952, 47710034493439, +STORE, 47710034804736, 47710034808831, +STORE, 47710034493440, 47710034804735, +ERASE, 47710034493440, 47710034493440, +STORE, 47710034493440, 47710034804735, +STORE, 47710034833408, 47710034849791, +STORE, 47710034808832, 47710034833407, +ERASE, 47710034808832, 47710034808832, +STORE, 47710034808832, 47710034833407, +ERASE, 47710034833408, 47710034833408, +STORE, 47710034833408, 47710034849791, +STORE, 47710034849792, 47710034984959, +ERASE, 47710034849792, 47710034849792, +STORE, 47710034849792, 47710034874367, +STORE, 47710034874368, 47710034984959, +STORE, 47710034935808, 47710034984959, +STORE, 47710034874368, 47710034935807, +ERASE, 47710034874368, 47710034874368, +STORE, 47710034874368, 47710034935807, +STORE, 47710034960384, 47710034984959, +STORE, 47710034935808, 47710034960383, +ERASE, 47710034935808, 47710034935808, +STORE, 47710034935808, 47710034984959, +ERASE, 47710034935808, 47710034935808, +STORE, 47710034935808, 47710034960383, +STORE, 47710034960384, 47710034984959, +STORE, 47710034968576, 47710034984959, +STORE, 47710034960384, 47710034968575, +ERASE, 47710034960384, 47710034960384, +STORE, 47710034960384, 47710034968575, +ERASE, 47710034968576, 47710034968576, +STORE, 47710034968576, 47710034984959, +STORE, 47710034984960, 47710035005439, +ERASE, 47710034984960, 47710034984960, +STORE, 47710034984960, 47710034989055, +STORE, 47710034989056, 47710035005439, +STORE, 47710034993152, 47710035005439, +STORE, 47710034989056, 47710034993151, +ERASE, 47710034989056, 47710034989056, +STORE, 47710034989056, 47710034993151, +STORE, 47710034997248, 47710035005439, +STORE, 47710034993152, 47710034997247, +ERASE, 47710034993152, 47710034993152, +STORE, 47710034993152, 47710035005439, +ERASE, 47710034993152, 47710034993152, +STORE, 47710034993152, 47710034997247, +STORE, 47710034997248, 47710035005439, +ERASE, 47710034997248, 47710034997248, +STORE, 47710034997248, 47710035005439, +STORE, 47710035005440, 47710035013631, +ERASE, 47710034808832, 47710034808832, +STORE, 47710034808832, 47710034825215, +STORE, 47710034825216, 47710034833407, +ERASE, 47710034997248, 47710034997248, +STORE, 47710034997248, 47710035001343, +STORE, 47710035001344, 47710035005439, +ERASE, 47710034960384, 47710034960384, +STORE, 47710034960384, 47710034964479, +STORE, 47710034964480, 47710034968575, +ERASE, 47710032789504, 47710032789504, +STORE, 47710032789504, 47710032986111, +STORE, 47710032986112, 47710032994303, +ERASE, 47710029950976, 47710029950976, +STORE, 47710029950976, 47710029955071, +STORE, 47710029955072, 47710029959167, +ERASE, 94844636766208, 94844636766208, +STORE, 94844636766208, 94844636774399, +STORE, 94844636774400, 94844636778495, +ERASE, 139922765377536, 139922765377536, +STORE, 139922765377536, 139922765381631, +STORE, 139922765381632, 139922765385727, +ERASE, 47710029778944, 47710029778944, +STORE, 94844641775616, 94844641910783, +STORE, 140737488347136, 140737488351231, +STORE, 140732213886976, 140737488351231, +ERASE, 140732213886976, 140732213886976, +STORE, 140732213886976, 140732213891071, +STORE, 94240508887040, 94240509059071, +ERASE, 94240508887040, 94240508887040, +STORE, 94240508887040, 94240508903423, +STORE, 94240508903424, 94240509059071, +ERASE, 94240508903424, 94240508903424, +STORE, 94240508903424, 94240509005823, +STORE, 94240509005824, 94240509046783, +STORE, 94240509046784, 94240509059071, +STORE, 140275106516992, 140275106689023, +ERASE, 140275106516992, 140275106516992, +STORE, 140275106516992, 140275106521087, +STORE, 140275106521088, 140275106689023, +ERASE, 140275106521088, 140275106521088, +STORE, 140275106521088, 140275106643967, +STORE, 140275106643968, 140275106676735, +STORE, 140275106676736, 140275106684927, +STORE, 140275106684928, 140275106689023, +STORE, 140732213977088, 140732213981183, +STORE, 140732213964800, 140732213977087, +STORE, 47357688479744, 47357688487935, +STORE, 47357688487936, 47357688496127, +STORE, 47357688496128, 47357688659967, +ERASE, 47357688496128, 47357688496128, +STORE, 47357688496128, 47357688508415, +STORE, 47357688508416, 47357688659967, +STORE, 47357688606720, 47357688659967, +STORE, 47357688508416, 47357688606719, +ERASE, 47357688508416, 47357688508416, +STORE, 47357688508416, 47357688606719, +STORE, 47357688651776, 47357688659967, +STORE, 47357688606720, 47357688651775, +ERASE, 47357688606720, 47357688606720, +STORE, 47357688606720, 47357688659967, +ERASE, 47357688606720, 47357688606720, +STORE, 47357688606720, 47357688651775, +STORE, 47357688651776, 47357688659967, +ERASE, 47357688651776, 47357688651776, +STORE, 47357688651776, 47357688659967, +STORE, 47357688659968, 47357691711487, +STORE, 47357689204736, 47357691711487, +STORE, 47357688659968, 47357689204735, +ERASE, 47357689204736, 47357689204736, +STORE, 47357689204736, 47357691490303, +STORE, 47357691490304, 47357691711487, +STORE, 47357690900480, 47357691490303, +STORE, 47357689204736, 47357690900479, +ERASE, 47357689204736, 47357689204736, +STORE, 47357689204736, 47357690900479, +STORE, 47357691486208, 47357691490303, +STORE, 47357690900480, 47357691486207, +ERASE, 47357690900480, 47357690900480, +STORE, 47357690900480, 47357691486207, +STORE, 47357691695104, 47357691711487, +STORE, 47357691490304, 47357691695103, +ERASE, 47357691490304, 47357691490304, +STORE, 47357691490304, 47357691695103, +ERASE, 47357691695104, 47357691695104, +STORE, 47357691695104, 47357691711487, +STORE, 47357691711488, 47357693550591, +STORE, 47357691850752, 47357693550591, +STORE, 47357691711488, 47357691850751, +ERASE, 47357691850752, 47357691850752, +STORE, 47357691850752, 47357693509631, +STORE, 47357693509632, 47357693550591, +STORE, 47357693194240, 47357693509631, +STORE, 47357691850752, 47357693194239, +ERASE, 47357691850752, 47357691850752, +STORE, 47357691850752, 47357693194239, +STORE, 47357693505536, 47357693509631, +STORE, 47357693194240, 47357693505535, +ERASE, 47357693194240, 47357693194240, +STORE, 47357693194240, 47357693505535, +STORE, 47357693534208, 47357693550591, +STORE, 47357693509632, 47357693534207, +ERASE, 47357693509632, 47357693509632, +STORE, 47357693509632, 47357693534207, +ERASE, 47357693534208, 47357693534208, +STORE, 47357693534208, 47357693550591, +STORE, 47357693550592, 47357693685759, +ERASE, 47357693550592, 47357693550592, +STORE, 47357693550592, 47357693575167, +STORE, 47357693575168, 47357693685759, +STORE, 47357693636608, 47357693685759, +STORE, 47357693575168, 47357693636607, +ERASE, 47357693575168, 47357693575168, +STORE, 47357693575168, 47357693636607, +STORE, 47357693661184, 47357693685759, +STORE, 47357693636608, 47357693661183, +ERASE, 47357693636608, 47357693636608, +STORE, 47357693636608, 47357693685759, +ERASE, 47357693636608, 47357693636608, +STORE, 47357693636608, 47357693661183, +STORE, 47357693661184, 47357693685759, +STORE, 47357693669376, 47357693685759, +STORE, 47357693661184, 47357693669375, +ERASE, 47357693661184, 47357693661184, +STORE, 47357693661184, 47357693669375, +ERASE, 47357693669376, 47357693669376, +STORE, 47357693669376, 47357693685759, +STORE, 47357693685760, 47357693706239, +ERASE, 47357693685760, 47357693685760, +STORE, 47357693685760, 47357693689855, +STORE, 47357693689856, 47357693706239, +STORE, 47357693693952, 47357693706239, +STORE, 47357693689856, 47357693693951, +ERASE, 47357693689856, 47357693689856, +STORE, 47357693689856, 47357693693951, +STORE, 47357693698048, 47357693706239, +STORE, 47357693693952, 47357693698047, +ERASE, 47357693693952, 47357693693952, +STORE, 47357693693952, 47357693706239, +ERASE, 47357693693952, 47357693693952, +STORE, 47357693693952, 47357693698047, +STORE, 47357693698048, 47357693706239, +ERASE, 47357693698048, 47357693698048, +STORE, 47357693698048, 47357693706239, +STORE, 47357693706240, 47357693714431, +ERASE, 47357693509632, 47357693509632, +STORE, 47357693509632, 47357693526015, +STORE, 47357693526016, 47357693534207, +ERASE, 47357693698048, 47357693698048, +STORE, 47357693698048, 47357693702143, +STORE, 47357693702144, 47357693706239, +ERASE, 47357693661184, 47357693661184, +STORE, 47357693661184, 47357693665279, +STORE, 47357693665280, 47357693669375, +ERASE, 47357691490304, 47357691490304, +STORE, 47357691490304, 47357691686911, +STORE, 47357691686912, 47357691695103, +ERASE, 47357688651776, 47357688651776, +STORE, 47357688651776, 47357688655871, +STORE, 47357688655872, 47357688659967, +ERASE, 94240509046784, 94240509046784, +STORE, 94240509046784, 94240509054975, +STORE, 94240509054976, 94240509059071, +ERASE, 140275106676736, 140275106676736, +STORE, 140275106676736, 140275106680831, +STORE, 140275106680832, 140275106684927, +ERASE, 47357688479744, 47357688479744, +STORE, 94240518361088, 94240518496255, +STORE, 140737488347136, 140737488351231, +STORE, 140732688277504, 140737488351231, +ERASE, 140732688277504, 140732688277504, +STORE, 140732688277504, 140732688281599, +STORE, 94629171351552, 94629172064255, +ERASE, 94629171351552, 94629171351552, +STORE, 94629171351552, 94629171400703, +STORE, 94629171400704, 94629172064255, +ERASE, 94629171400704, 94629171400704, +STORE, 94629171400704, 94629171945471, +STORE, 94629171945472, 94629172043775, +STORE, 94629172043776, 94629172064255, +STORE, 139770707644416, 139770707816447, +ERASE, 139770707644416, 139770707644416, +STORE, 139770707644416, 139770707648511, +STORE, 139770707648512, 139770707816447, +ERASE, 139770707648512, 139770707648512, +STORE, 139770707648512, 139770707771391, +STORE, 139770707771392, 139770707804159, +STORE, 139770707804160, 139770707812351, +STORE, 139770707812352, 139770707816447, +STORE, 140732689121280, 140732689125375, +STORE, 140732689108992, 140732689121279, +STORE, 47862087352320, 47862087360511, +STORE, 47862087360512, 47862087368703, +STORE, 47862087368704, 47862087475199, +STORE, 47862087385088, 47862087475199, +STORE, 47862087368704, 47862087385087, +ERASE, 47862087385088, 47862087385088, +STORE, 47862087385088, 47862087458815, +STORE, 47862087458816, 47862087475199, +STORE, 47862087438336, 47862087458815, +STORE, 47862087385088, 47862087438335, +ERASE, 47862087385088, 47862087385088, +STORE, 47862087385088, 47862087438335, +STORE, 47862087454720, 47862087458815, +STORE, 47862087438336, 47862087454719, +ERASE, 47862087438336, 47862087438336, +STORE, 47862087438336, 47862087454719, +STORE, 47862087467008, 47862087475199, +STORE, 47862087458816, 47862087467007, +ERASE, 47862087458816, 47862087458816, +STORE, 47862087458816, 47862087467007, +ERASE, 47862087467008, 47862087467008, +STORE, 47862087467008, 47862087475199, +STORE, 47862087475200, 47862089314303, +STORE, 47862087614464, 47862089314303, +STORE, 47862087475200, 47862087614463, +ERASE, 47862087614464, 47862087614464, +STORE, 47862087614464, 47862089273343, +STORE, 47862089273344, 47862089314303, +STORE, 47862088957952, 47862089273343, +STORE, 47862087614464, 47862088957951, +ERASE, 47862087614464, 47862087614464, +STORE, 47862087614464, 47862088957951, +STORE, 47862089269248, 47862089273343, +STORE, 47862088957952, 47862089269247, +ERASE, 47862088957952, 47862088957952, +STORE, 47862088957952, 47862089269247, +STORE, 47862089297920, 47862089314303, +STORE, 47862089273344, 47862089297919, +ERASE, 47862089273344, 47862089273344, +STORE, 47862089273344, 47862089297919, +ERASE, 47862089297920, 47862089297920, +STORE, 47862089297920, 47862089314303, +STORE, 47862089297920, 47862089326591, +ERASE, 47862089273344, 47862089273344, +STORE, 47862089273344, 47862089289727, +STORE, 47862089289728, 47862089297919, +ERASE, 47862087458816, 47862087458816, +STORE, 47862087458816, 47862087462911, +STORE, 47862087462912, 47862087467007, +ERASE, 94629172043776, 94629172043776, +STORE, 94629172043776, 94629172060159, +STORE, 94629172060160, 94629172064255, +ERASE, 139770707804160, 139770707804160, +STORE, 139770707804160, 139770707808255, +STORE, 139770707808256, 139770707812351, +ERASE, 47862087352320, 47862087352320, +STORE, 94629197533184, 94629197668351, +STORE, 140737488347136, 140737488351231, +STORE, 140727540711424, 140737488351231, +ERASE, 140727540711424, 140727540711424, +STORE, 140727540711424, 140727540715519, +STORE, 94299865313280, 94299866025983, +ERASE, 94299865313280, 94299865313280, +STORE, 94299865313280, 94299865362431, +STORE, 94299865362432, 94299866025983, +ERASE, 94299865362432, 94299865362432, +STORE, 94299865362432, 94299865907199, +STORE, 94299865907200, 94299866005503, +STORE, 94299866005504, 94299866025983, +STORE, 140680268763136, 140680268935167, +ERASE, 140680268763136, 140680268763136, +STORE, 140680268763136, 140680268767231, +STORE, 140680268767232, 140680268935167, +ERASE, 140680268767232, 140680268767232, +STORE, 140680268767232, 140680268890111, +STORE, 140680268890112, 140680268922879, +STORE, 140680268922880, 140680268931071, +STORE, 140680268931072, 140680268935167, +STORE, 140727541424128, 140727541428223, +STORE, 140727541411840, 140727541424127, +STORE, 46952526233600, 46952526241791, +STORE, 46952526241792, 46952526249983, +STORE, 46952526249984, 46952526356479, +STORE, 46952526266368, 46952526356479, +STORE, 46952526249984, 46952526266367, +ERASE, 46952526266368, 46952526266368, +STORE, 46952526266368, 46952526340095, +STORE, 46952526340096, 46952526356479, +STORE, 46952526319616, 46952526340095, +STORE, 46952526266368, 46952526319615, +ERASE, 46952526266368, 46952526266368, +STORE, 46952526266368, 46952526319615, +STORE, 46952526336000, 46952526340095, +STORE, 46952526319616, 46952526335999, +ERASE, 46952526319616, 46952526319616, +STORE, 46952526319616, 46952526335999, +STORE, 46952526348288, 46952526356479, +STORE, 46952526340096, 46952526348287, +ERASE, 46952526340096, 46952526340096, +STORE, 46952526340096, 46952526348287, +ERASE, 46952526348288, 46952526348288, +STORE, 46952526348288, 46952526356479, +STORE, 46952526356480, 46952528195583, +STORE, 46952526495744, 46952528195583, +STORE, 46952526356480, 46952526495743, +ERASE, 46952526495744, 46952526495744, +STORE, 46952526495744, 46952528154623, +STORE, 46952528154624, 46952528195583, +STORE, 46952527839232, 46952528154623, +STORE, 46952526495744, 46952527839231, +ERASE, 46952526495744, 46952526495744, +STORE, 46952526495744, 46952527839231, +STORE, 46952528150528, 46952528154623, +STORE, 46952527839232, 46952528150527, +ERASE, 46952527839232, 46952527839232, +STORE, 46952527839232, 46952528150527, +STORE, 46952528179200, 46952528195583, +STORE, 46952528154624, 46952528179199, +ERASE, 46952528154624, 46952528154624, +STORE, 46952528154624, 46952528179199, +ERASE, 46952528179200, 46952528179200, +STORE, 46952528179200, 46952528195583, +STORE, 46952528179200, 46952528207871, +ERASE, 46952528154624, 46952528154624, +STORE, 46952528154624, 46952528171007, +STORE, 46952528171008, 46952528179199, +ERASE, 46952526340096, 46952526340096, +STORE, 46952526340096, 46952526344191, +STORE, 46952526344192, 46952526348287, +ERASE, 94299866005504, 94299866005504, +STORE, 94299866005504, 94299866021887, +STORE, 94299866021888, 94299866025983, +ERASE, 140680268922880, 140680268922880, +STORE, 140680268922880, 140680268926975, +STORE, 140680268926976, 140680268931071, +ERASE, 46952526233600, 46952526233600, +STORE, 140737488347136, 140737488351231, +STORE, 140722874793984, 140737488351231, +ERASE, 140722874793984, 140722874793984, +STORE, 140722874793984, 140722874798079, +STORE, 94448916213760, 94448916926463, +ERASE, 94448916213760, 94448916213760, +STORE, 94448916213760, 94448916262911, +STORE, 94448916262912, 94448916926463, +ERASE, 94448916262912, 94448916262912, +STORE, 94448916262912, 94448916807679, +STORE, 94448916807680, 94448916905983, +STORE, 94448916905984, 94448916926463, +STORE, 140389117046784, 140389117218815, +ERASE, 140389117046784, 140389117046784, +STORE, 140389117046784, 140389117050879, +STORE, 140389117050880, 140389117218815, +ERASE, 140389117050880, 140389117050880, +STORE, 140389117050880, 140389117173759, +STORE, 140389117173760, 140389117206527, +STORE, 140389117206528, 140389117214719, +STORE, 140389117214720, 140389117218815, +STORE, 140722875297792, 140722875301887, +STORE, 140722875285504, 140722875297791, +STORE, 47243677949952, 47243677958143, +STORE, 47243677958144, 47243677966335, +STORE, 47243677966336, 47243678072831, +STORE, 47243677982720, 47243678072831, +STORE, 47243677966336, 47243677982719, +ERASE, 47243677982720, 47243677982720, +STORE, 47243677982720, 47243678056447, +STORE, 47243678056448, 47243678072831, +STORE, 47243678035968, 47243678056447, +STORE, 47243677982720, 47243678035967, +ERASE, 47243677982720, 47243677982720, +STORE, 47243677982720, 47243678035967, +STORE, 47243678052352, 47243678056447, +STORE, 47243678035968, 47243678052351, +ERASE, 47243678035968, 47243678035968, +STORE, 47243678035968, 47243678052351, +STORE, 47243678064640, 47243678072831, +STORE, 47243678056448, 47243678064639, +ERASE, 47243678056448, 47243678056448, +STORE, 47243678056448, 47243678064639, +ERASE, 47243678064640, 47243678064640, +STORE, 47243678064640, 47243678072831, +STORE, 47243678072832, 47243679911935, +STORE, 47243678212096, 47243679911935, +STORE, 47243678072832, 47243678212095, +ERASE, 47243678212096, 47243678212096, +STORE, 47243678212096, 47243679870975, +STORE, 47243679870976, 47243679911935, +STORE, 47243679555584, 47243679870975, +STORE, 47243678212096, 47243679555583, +ERASE, 47243678212096, 47243678212096, +STORE, 47243678212096, 47243679555583, +STORE, 47243679866880, 47243679870975, +STORE, 47243679555584, 47243679866879, +ERASE, 47243679555584, 47243679555584, +STORE, 47243679555584, 47243679866879, +STORE, 47243679895552, 47243679911935, +STORE, 47243679870976, 47243679895551, +ERASE, 47243679870976, 47243679870976, +STORE, 47243679870976, 47243679895551, +ERASE, 47243679895552, 47243679895552, +STORE, 47243679895552, 47243679911935, +STORE, 47243679895552, 47243679924223, +ERASE, 47243679870976, 47243679870976, +STORE, 47243679870976, 47243679887359, +STORE, 47243679887360, 47243679895551, +ERASE, 47243678056448, 47243678056448, +STORE, 47243678056448, 47243678060543, +STORE, 47243678060544, 47243678064639, +ERASE, 94448916905984, 94448916905984, +STORE, 94448916905984, 94448916922367, +STORE, 94448916922368, 94448916926463, +ERASE, 140389117206528, 140389117206528, +STORE, 140389117206528, 140389117210623, +STORE, 140389117210624, 140389117214719, +ERASE, 47243677949952, 47243677949952, +STORE, 140737488347136, 140737488351231, +STORE, 140733068505088, 140737488351231, +ERASE, 140733068505088, 140733068505088, +STORE, 140733068505088, 140733068509183, +STORE, 94207145750528, 94207146463231, +ERASE, 94207145750528, 94207145750528, +STORE, 94207145750528, 94207145799679, +STORE, 94207145799680, 94207146463231, +ERASE, 94207145799680, 94207145799680, +STORE, 94207145799680, 94207146344447, +STORE, 94207146344448, 94207146442751, +STORE, 94207146442752, 94207146463231, +STORE, 140684504911872, 140684505083903, +ERASE, 140684504911872, 140684504911872, +STORE, 140684504911872, 140684504915967, +STORE, 140684504915968, 140684505083903, +ERASE, 140684504915968, 140684504915968, +STORE, 140684504915968, 140684505038847, +STORE, 140684505038848, 140684505071615, +STORE, 140684505071616, 140684505079807, +STORE, 140684505079808, 140684505083903, +STORE, 140733068607488, 140733068611583, +STORE, 140733068595200, 140733068607487, +STORE, 46948290084864, 46948290093055, +STORE, 46948290093056, 46948290101247, +STORE, 46948290101248, 46948290207743, +STORE, 46948290117632, 46948290207743, +STORE, 46948290101248, 46948290117631, +ERASE, 46948290117632, 46948290117632, +STORE, 46948290117632, 46948290191359, +STORE, 46948290191360, 46948290207743, +STORE, 46948290170880, 46948290191359, +STORE, 46948290117632, 46948290170879, +ERASE, 46948290117632, 46948290117632, +STORE, 46948290117632, 46948290170879, +STORE, 46948290187264, 46948290191359, +STORE, 46948290170880, 46948290187263, +ERASE, 46948290170880, 46948290170880, +STORE, 46948290170880, 46948290187263, +STORE, 46948290199552, 46948290207743, +STORE, 46948290191360, 46948290199551, +ERASE, 46948290191360, 46948290191360, +STORE, 46948290191360, 46948290199551, +ERASE, 46948290199552, 46948290199552, +STORE, 46948290199552, 46948290207743, +STORE, 46948290207744, 46948292046847, +STORE, 46948290347008, 46948292046847, +STORE, 46948290207744, 46948290347007, +ERASE, 46948290347008, 46948290347008, +STORE, 46948290347008, 46948292005887, +STORE, 46948292005888, 46948292046847, +STORE, 46948291690496, 46948292005887, +STORE, 46948290347008, 46948291690495, +ERASE, 46948290347008, 46948290347008, +STORE, 46948290347008, 46948291690495, +STORE, 46948292001792, 46948292005887, +STORE, 46948291690496, 46948292001791, +ERASE, 46948291690496, 46948291690496, +STORE, 46948291690496, 46948292001791, +STORE, 46948292030464, 46948292046847, +STORE, 46948292005888, 46948292030463, +ERASE, 46948292005888, 46948292005888, +STORE, 46948292005888, 46948292030463, +ERASE, 46948292030464, 46948292030464, +STORE, 46948292030464, 46948292046847, +STORE, 46948292030464, 46948292059135, +ERASE, 46948292005888, 46948292005888, +STORE, 46948292005888, 46948292022271, +STORE, 46948292022272, 46948292030463, +ERASE, 46948290191360, 46948290191360, +STORE, 46948290191360, 46948290195455, +STORE, 46948290195456, 46948290199551, +ERASE, 94207146442752, 94207146442752, +STORE, 94207146442752, 94207146459135, +STORE, 94207146459136, 94207146463231, +ERASE, 140684505071616, 140684505071616, +STORE, 140684505071616, 140684505075711, +STORE, 140684505075712, 140684505079807, +ERASE, 46948290084864, 46948290084864, +STORE, 140737488347136, 140737488351231, +STORE, 140726367158272, 140737488351231, +ERASE, 140726367158272, 140726367158272, +STORE, 140726367158272, 140726367162367, +STORE, 94436124106752, 94436124819455, +ERASE, 94436124106752, 94436124106752, +STORE, 94436124106752, 94436124155903, +STORE, 94436124155904, 94436124819455, +ERASE, 94436124155904, 94436124155904, +STORE, 94436124155904, 94436124700671, +STORE, 94436124700672, 94436124798975, +STORE, 94436124798976, 94436124819455, +STORE, 140049025044480, 140049025216511, +ERASE, 140049025044480, 140049025044480, +STORE, 140049025044480, 140049025048575, +STORE, 140049025048576, 140049025216511, +ERASE, 140049025048576, 140049025048576, +STORE, 140049025048576, 140049025171455, +STORE, 140049025171456, 140049025204223, +STORE, 140049025204224, 140049025212415, +STORE, 140049025212416, 140049025216511, +STORE, 140726367256576, 140726367260671, +STORE, 140726367244288, 140726367256575, +STORE, 47583769952256, 47583769960447, +STORE, 47583769960448, 47583769968639, +STORE, 47583769968640, 47583770075135, +STORE, 47583769985024, 47583770075135, +STORE, 47583769968640, 47583769985023, +ERASE, 47583769985024, 47583769985024, +STORE, 47583769985024, 47583770058751, +STORE, 47583770058752, 47583770075135, +STORE, 47583770038272, 47583770058751, +STORE, 47583769985024, 47583770038271, +ERASE, 47583769985024, 47583769985024, +STORE, 47583769985024, 47583770038271, +STORE, 47583770054656, 47583770058751, +STORE, 47583770038272, 47583770054655, +ERASE, 47583770038272, 47583770038272, +STORE, 47583770038272, 47583770054655, +STORE, 47583770066944, 47583770075135, +STORE, 47583770058752, 47583770066943, +ERASE, 47583770058752, 47583770058752, +STORE, 47583770058752, 47583770066943, +ERASE, 47583770066944, 47583770066944, +STORE, 47583770066944, 47583770075135, +STORE, 47583770075136, 47583771914239, +STORE, 47583770214400, 47583771914239, +STORE, 47583770075136, 47583770214399, +ERASE, 47583770214400, 47583770214400, +STORE, 47583770214400, 47583771873279, +STORE, 47583771873280, 47583771914239, +STORE, 47583771557888, 47583771873279, +STORE, 47583770214400, 47583771557887, +ERASE, 47583770214400, 47583770214400, +STORE, 47583770214400, 47583771557887, +STORE, 47583771869184, 47583771873279, +STORE, 47583771557888, 47583771869183, +ERASE, 47583771557888, 47583771557888, +STORE, 47583771557888, 47583771869183, +STORE, 47583771897856, 47583771914239, +STORE, 47583771873280, 47583771897855, +ERASE, 47583771873280, 47583771873280, +STORE, 47583771873280, 47583771897855, +ERASE, 47583771897856, 47583771897856, +STORE, 47583771897856, 47583771914239, +STORE, 47583771897856, 47583771926527, +ERASE, 47583771873280, 47583771873280, +STORE, 47583771873280, 47583771889663, +STORE, 47583771889664, 47583771897855, +ERASE, 47583770058752, 47583770058752, +STORE, 47583770058752, 47583770062847, +STORE, 47583770062848, 47583770066943, +ERASE, 94436124798976, 94436124798976, +STORE, 94436124798976, 94436124815359, +STORE, 94436124815360, 94436124819455, +ERASE, 140049025204224, 140049025204224, +STORE, 140049025204224, 140049025208319, +STORE, 140049025208320, 140049025212415, +ERASE, 47583769952256, 47583769952256, +STORE, 140737488347136, 140737488351231, +STORE, 140727116099584, 140737488351231, +ERASE, 140727116099584, 140727116099584, +STORE, 140727116099584, 140727116103679, +STORE, 94166319734784, 94166320447487, +ERASE, 94166319734784, 94166319734784, +STORE, 94166319734784, 94166319783935, +STORE, 94166319783936, 94166320447487, +ERASE, 94166319783936, 94166319783936, +STORE, 94166319783936, 94166320328703, +STORE, 94166320328704, 94166320427007, +STORE, 94166320427008, 94166320447487, +STORE, 139976559542272, 139976559714303, +ERASE, 139976559542272, 139976559542272, +STORE, 139976559542272, 139976559546367, +STORE, 139976559546368, 139976559714303, +ERASE, 139976559546368, 139976559546368, +STORE, 139976559546368, 139976559669247, +STORE, 139976559669248, 139976559702015, +STORE, 139976559702016, 139976559710207, +STORE, 139976559710208, 139976559714303, +STORE, 140727116222464, 140727116226559, +STORE, 140727116210176, 140727116222463, +STORE, 47656235454464, 47656235462655, +STORE, 47656235462656, 47656235470847, +STORE, 47656235470848, 47656235577343, +STORE, 47656235487232, 47656235577343, +STORE, 47656235470848, 47656235487231, +ERASE, 47656235487232, 47656235487232, +STORE, 47656235487232, 47656235560959, +STORE, 47656235560960, 47656235577343, +STORE, 47656235540480, 47656235560959, +STORE, 47656235487232, 47656235540479, +ERASE, 47656235487232, 47656235487232, +STORE, 47656235487232, 47656235540479, +STORE, 47656235556864, 47656235560959, +STORE, 47656235540480, 47656235556863, +ERASE, 47656235540480, 47656235540480, +STORE, 47656235540480, 47656235556863, +STORE, 47656235569152, 47656235577343, +STORE, 47656235560960, 47656235569151, +ERASE, 47656235560960, 47656235560960, +STORE, 47656235560960, 47656235569151, +ERASE, 47656235569152, 47656235569152, +STORE, 47656235569152, 47656235577343, +STORE, 47656235577344, 47656237416447, +STORE, 47656235716608, 47656237416447, +STORE, 47656235577344, 47656235716607, +ERASE, 47656235716608, 47656235716608, +STORE, 47656235716608, 47656237375487, +STORE, 47656237375488, 47656237416447, +STORE, 47656237060096, 47656237375487, +STORE, 47656235716608, 47656237060095, +ERASE, 47656235716608, 47656235716608, +STORE, 47656235716608, 47656237060095, +STORE, 47656237371392, 47656237375487, +STORE, 47656237060096, 47656237371391, +ERASE, 47656237060096, 47656237060096, +STORE, 47656237060096, 47656237371391, +STORE, 47656237400064, 47656237416447, +STORE, 47656237375488, 47656237400063, +ERASE, 47656237375488, 47656237375488, +STORE, 47656237375488, 47656237400063, +ERASE, 47656237400064, 47656237400064, +STORE, 47656237400064, 47656237416447, +STORE, 47656237400064, 47656237428735, +ERASE, 47656237375488, 47656237375488, +STORE, 47656237375488, 47656237391871, +STORE, 47656237391872, 47656237400063, +ERASE, 47656235560960, 47656235560960, +STORE, 47656235560960, 47656235565055, +STORE, 47656235565056, 47656235569151, +ERASE, 94166320427008, 94166320427008, +STORE, 94166320427008, 94166320443391, +STORE, 94166320443392, 94166320447487, +ERASE, 139976559702016, 139976559702016, +STORE, 139976559702016, 139976559706111, +STORE, 139976559706112, 139976559710207, +ERASE, 47656235454464, 47656235454464, +STORE, 94166332153856, 94166332289023, +STORE, 140737488347136, 140737488351231, +STORE, 140726412816384, 140737488351231, +ERASE, 140726412816384, 140726412816384, +STORE, 140726412816384, 140726412820479, +STORE, 94094884507648, 94094885220351, +ERASE, 94094884507648, 94094884507648, +STORE, 94094884507648, 94094884556799, +STORE, 94094884556800, 94094885220351, +ERASE, 94094884556800, 94094884556800, +STORE, 94094884556800, 94094885101567, +STORE, 94094885101568, 94094885199871, +STORE, 94094885199872, 94094885220351, +STORE, 139773773938688, 139773774110719, +ERASE, 139773773938688, 139773773938688, +STORE, 139773773938688, 139773773942783, +STORE, 139773773942784, 139773774110719, +ERASE, 139773773942784, 139773773942784, +STORE, 139773773942784, 139773774065663, +STORE, 139773774065664, 139773774098431, +STORE, 139773774098432, 139773774106623, +STORE, 139773774106624, 139773774110719, +STORE, 140726412963840, 140726412967935, +STORE, 140726412951552, 140726412963839, +STORE, 47859021058048, 47859021066239, +STORE, 47859021066240, 47859021074431, +STORE, 47859021074432, 47859021180927, +STORE, 47859021090816, 47859021180927, +STORE, 47859021074432, 47859021090815, +ERASE, 47859021090816, 47859021090816, +STORE, 47859021090816, 47859021164543, +STORE, 47859021164544, 47859021180927, +STORE, 47859021144064, 47859021164543, +STORE, 47859021090816, 47859021144063, +ERASE, 47859021090816, 47859021090816, +STORE, 47859021090816, 47859021144063, +STORE, 47859021160448, 47859021164543, +STORE, 47859021144064, 47859021160447, +ERASE, 47859021144064, 47859021144064, +STORE, 47859021144064, 47859021160447, +STORE, 47859021172736, 47859021180927, +STORE, 47859021164544, 47859021172735, +ERASE, 47859021164544, 47859021164544, +STORE, 47859021164544, 47859021172735, +ERASE, 47859021172736, 47859021172736, +STORE, 47859021172736, 47859021180927, +STORE, 47859021180928, 47859023020031, +STORE, 47859021320192, 47859023020031, +STORE, 47859021180928, 47859021320191, +ERASE, 47859021320192, 47859021320192, +STORE, 47859021320192, 47859022979071, +STORE, 47859022979072, 47859023020031, +STORE, 47859022663680, 47859022979071, +STORE, 47859021320192, 47859022663679, +ERASE, 47859021320192, 47859021320192, +STORE, 47859021320192, 47859022663679, +STORE, 47859022974976, 47859022979071, +STORE, 47859022663680, 47859022974975, +ERASE, 47859022663680, 47859022663680, +STORE, 47859022663680, 47859022974975, +STORE, 47859023003648, 47859023020031, +STORE, 47859022979072, 47859023003647, +ERASE, 47859022979072, 47859022979072, +STORE, 47859022979072, 47859023003647, +ERASE, 47859023003648, 47859023003648, +STORE, 47859023003648, 47859023020031, +STORE, 47859023003648, 47859023032319, +ERASE, 47859022979072, 47859022979072, +STORE, 47859022979072, 47859022995455, +STORE, 47859022995456, 47859023003647, +ERASE, 47859021164544, 47859021164544, +STORE, 47859021164544, 47859021168639, +STORE, 47859021168640, 47859021172735, +ERASE, 94094885199872, 94094885199872, +STORE, 94094885199872, 94094885216255, +STORE, 94094885216256, 94094885220351, +ERASE, 139773774098432, 139773774098432, +STORE, 139773774098432, 139773774102527, +STORE, 139773774102528, 139773774106623, +ERASE, 47859021058048, 47859021058048, +STORE, 94094901108736, 94094901243903, +STORE, 140737488347136, 140737488351231, +STORE, 140736567963648, 140737488351231, +ERASE, 140736567963648, 140736567963648, +STORE, 140736567963648, 140736567967743, +STORE, 94924425748480, 94924426461183, +ERASE, 94924425748480, 94924425748480, +STORE, 94924425748480, 94924425797631, +STORE, 94924425797632, 94924426461183, +ERASE, 94924425797632, 94924425797632, +STORE, 94924425797632, 94924426342399, +STORE, 94924426342400, 94924426440703, +STORE, 94924426440704, 94924426461183, +STORE, 140042126319616, 140042126491647, +ERASE, 140042126319616, 140042126319616, +STORE, 140042126319616, 140042126323711, +STORE, 140042126323712, 140042126491647, +ERASE, 140042126323712, 140042126323712, +STORE, 140042126323712, 140042126446591, +STORE, 140042126446592, 140042126479359, +STORE, 140042126479360, 140042126487551, +STORE, 140042126487552, 140042126491647, +STORE, 140736568672256, 140736568676351, +STORE, 140736568659968, 140736568672255, +STORE, 47590668677120, 47590668685311, +STORE, 47590668685312, 47590668693503, +STORE, 47590668693504, 47590668799999, +STORE, 47590668709888, 47590668799999, +STORE, 47590668693504, 47590668709887, +ERASE, 47590668709888, 47590668709888, +STORE, 47590668709888, 47590668783615, +STORE, 47590668783616, 47590668799999, +STORE, 47590668763136, 47590668783615, +STORE, 47590668709888, 47590668763135, +ERASE, 47590668709888, 47590668709888, +STORE, 47590668709888, 47590668763135, +STORE, 47590668779520, 47590668783615, +STORE, 47590668763136, 47590668779519, +ERASE, 47590668763136, 47590668763136, +STORE, 47590668763136, 47590668779519, +STORE, 47590668791808, 47590668799999, +STORE, 47590668783616, 47590668791807, +ERASE, 47590668783616, 47590668783616, +STORE, 47590668783616, 47590668791807, +ERASE, 47590668791808, 47590668791808, +STORE, 47590668791808, 47590668799999, +STORE, 47590668800000, 47590670639103, +STORE, 47590668939264, 47590670639103, +STORE, 47590668800000, 47590668939263, +ERASE, 47590668939264, 47590668939264, +STORE, 47590668939264, 47590670598143, +STORE, 47590670598144, 47590670639103, +STORE, 47590670282752, 47590670598143, +STORE, 47590668939264, 47590670282751, +ERASE, 47590668939264, 47590668939264, +STORE, 47590668939264, 47590670282751, +STORE, 47590670594048, 47590670598143, +STORE, 47590670282752, 47590670594047, +ERASE, 47590670282752, 47590670282752, +STORE, 47590670282752, 47590670594047, +STORE, 47590670622720, 47590670639103, +STORE, 47590670598144, 47590670622719, +ERASE, 47590670598144, 47590670598144, +STORE, 47590670598144, 47590670622719, +ERASE, 47590670622720, 47590670622720, +STORE, 47590670622720, 47590670639103, +STORE, 47590670622720, 47590670651391, +ERASE, 47590670598144, 47590670598144, +STORE, 47590670598144, 47590670614527, +STORE, 47590670614528, 47590670622719, +ERASE, 47590668783616, 47590668783616, +STORE, 47590668783616, 47590668787711, +STORE, 47590668787712, 47590668791807, +ERASE, 94924426440704, 94924426440704, +STORE, 94924426440704, 94924426457087, +STORE, 94924426457088, 94924426461183, +ERASE, 140042126479360, 140042126479360, +STORE, 140042126479360, 140042126483455, +STORE, 140042126483456, 140042126487551, +ERASE, 47590668677120, 47590668677120, +STORE, 140737488347136, 140737488351231, +STORE, 140733281439744, 140737488351231, +ERASE, 140733281439744, 140733281439744, +STORE, 140733281439744, 140733281443839, +STORE, 94490667069440, 94490667782143, +ERASE, 94490667069440, 94490667069440, +STORE, 94490667069440, 94490667118591, +STORE, 94490667118592, 94490667782143, +ERASE, 94490667118592, 94490667118592, +STORE, 94490667118592, 94490667663359, +STORE, 94490667663360, 94490667761663, +STORE, 94490667761664, 94490667782143, +STORE, 139878215118848, 139878215290879, +ERASE, 139878215118848, 139878215118848, +STORE, 139878215118848, 139878215122943, +STORE, 139878215122944, 139878215290879, +ERASE, 139878215122944, 139878215122944, +STORE, 139878215122944, 139878215245823, +STORE, 139878215245824, 139878215278591, +STORE, 139878215278592, 139878215286783, +STORE, 139878215286784, 139878215290879, +STORE, 140733281464320, 140733281468415, +STORE, 140733281452032, 140733281464319, +STORE, 47754579877888, 47754579886079, +STORE, 47754579886080, 47754579894271, +STORE, 47754579894272, 47754580000767, +STORE, 47754579910656, 47754580000767, +STORE, 47754579894272, 47754579910655, +ERASE, 47754579910656, 47754579910656, +STORE, 47754579910656, 47754579984383, +STORE, 47754579984384, 47754580000767, +STORE, 47754579963904, 47754579984383, +STORE, 47754579910656, 47754579963903, +ERASE, 47754579910656, 47754579910656, +STORE, 47754579910656, 47754579963903, +STORE, 47754579980288, 47754579984383, +STORE, 47754579963904, 47754579980287, +ERASE, 47754579963904, 47754579963904, +STORE, 47754579963904, 47754579980287, +STORE, 47754579992576, 47754580000767, +STORE, 47754579984384, 47754579992575, +ERASE, 47754579984384, 47754579984384, +STORE, 47754579984384, 47754579992575, +ERASE, 47754579992576, 47754579992576, +STORE, 47754579992576, 47754580000767, +STORE, 47754580000768, 47754581839871, +STORE, 47754580140032, 47754581839871, +STORE, 47754580000768, 47754580140031, +ERASE, 47754580140032, 47754580140032, +STORE, 47754580140032, 47754581798911, +STORE, 47754581798912, 47754581839871, +STORE, 47754581483520, 47754581798911, +STORE, 47754580140032, 47754581483519, +ERASE, 47754580140032, 47754580140032, +STORE, 47754580140032, 47754581483519, +STORE, 47754581794816, 47754581798911, +STORE, 47754581483520, 47754581794815, +ERASE, 47754581483520, 47754581483520, +STORE, 47754581483520, 47754581794815, +STORE, 47754581823488, 47754581839871, +STORE, 47754581798912, 47754581823487, +ERASE, 47754581798912, 47754581798912, +STORE, 47754581798912, 47754581823487, +ERASE, 47754581823488, 47754581823488, +STORE, 47754581823488, 47754581839871, +STORE, 47754581823488, 47754581852159, +ERASE, 47754581798912, 47754581798912, +STORE, 47754581798912, 47754581815295, +STORE, 47754581815296, 47754581823487, +ERASE, 47754579984384, 47754579984384, +STORE, 47754579984384, 47754579988479, +STORE, 47754579988480, 47754579992575, +ERASE, 94490667761664, 94490667761664, +STORE, 94490667761664, 94490667778047, +STORE, 94490667778048, 94490667782143, +ERASE, 139878215278592, 139878215278592, +STORE, 139878215278592, 139878215282687, +STORE, 139878215282688, 139878215286783, +ERASE, 47754579877888, 47754579877888, +STORE, 94490669649920, 94490669785087, +STORE, 140737488347136, 140737488351231, +STORE, 140735382188032, 140737488351231, +ERASE, 140735382188032, 140735382188032, +STORE, 140735382188032, 140735382192127, +STORE, 94150181302272, 94150182014975, +ERASE, 94150181302272, 94150181302272, +STORE, 94150181302272, 94150181351423, +STORE, 94150181351424, 94150182014975, +ERASE, 94150181351424, 94150181351424, +STORE, 94150181351424, 94150181896191, +STORE, 94150181896192, 94150181994495, +STORE, 94150181994496, 94150182014975, +STORE, 139679752458240, 139679752630271, +ERASE, 139679752458240, 139679752458240, +STORE, 139679752458240, 139679752462335, +STORE, 139679752462336, 139679752630271, +ERASE, 139679752462336, 139679752462336, +STORE, 139679752462336, 139679752585215, +STORE, 139679752585216, 139679752617983, +STORE, 139679752617984, 139679752626175, +STORE, 139679752626176, 139679752630271, +STORE, 140735382536192, 140735382540287, +STORE, 140735382523904, 140735382536191, +STORE, 47953042538496, 47953042546687, +STORE, 47953042546688, 47953042554879, +STORE, 47953042554880, 47953042661375, +STORE, 47953042571264, 47953042661375, +STORE, 47953042554880, 47953042571263, +ERASE, 47953042571264, 47953042571264, +STORE, 47953042571264, 47953042644991, +STORE, 47953042644992, 47953042661375, +STORE, 47953042624512, 47953042644991, +STORE, 47953042571264, 47953042624511, +ERASE, 47953042571264, 47953042571264, +STORE, 47953042571264, 47953042624511, +STORE, 47953042640896, 47953042644991, +STORE, 47953042624512, 47953042640895, +ERASE, 47953042624512, 47953042624512, +STORE, 47953042624512, 47953042640895, +STORE, 47953042653184, 47953042661375, +STORE, 47953042644992, 47953042653183, +ERASE, 47953042644992, 47953042644992, +STORE, 47953042644992, 47953042653183, +ERASE, 47953042653184, 47953042653184, +STORE, 47953042653184, 47953042661375, +STORE, 47953042661376, 47953044500479, +STORE, 47953042800640, 47953044500479, +STORE, 47953042661376, 47953042800639, +ERASE, 47953042800640, 47953042800640, +STORE, 47953042800640, 47953044459519, +STORE, 47953044459520, 47953044500479, +STORE, 47953044144128, 47953044459519, +STORE, 47953042800640, 47953044144127, +ERASE, 47953042800640, 47953042800640, +STORE, 47953042800640, 47953044144127, +STORE, 47953044455424, 47953044459519, +STORE, 47953044144128, 47953044455423, +ERASE, 47953044144128, 47953044144128, +STORE, 47953044144128, 47953044455423, +STORE, 47953044484096, 47953044500479, +STORE, 47953044459520, 47953044484095, +ERASE, 47953044459520, 47953044459520, +STORE, 47953044459520, 47953044484095, +ERASE, 47953044484096, 47953044484096, +STORE, 47953044484096, 47953044500479, +STORE, 47953044484096, 47953044512767, +ERASE, 47953044459520, 47953044459520, +STORE, 47953044459520, 47953044475903, +STORE, 47953044475904, 47953044484095, +ERASE, 47953042644992, 47953042644992, +STORE, 47953042644992, 47953042649087, +STORE, 47953042649088, 47953042653183, +ERASE, 94150181994496, 94150181994496, +STORE, 94150181994496, 94150182010879, +STORE, 94150182010880, 94150182014975, +ERASE, 139679752617984, 139679752617984, +STORE, 139679752617984, 139679752622079, +STORE, 139679752622080, 139679752626175, +ERASE, 47953042538496, 47953042538496, +STORE, 140737488347136, 140737488351231, +STORE, 140737044123648, 140737488351231, +ERASE, 140737044123648, 140737044123648, +STORE, 140737044123648, 140737044127743, +STORE, 94425324294144, 94425325006847, +ERASE, 94425324294144, 94425324294144, +STORE, 94425324294144, 94425324343295, +STORE, 94425324343296, 94425325006847, +ERASE, 94425324343296, 94425324343296, +STORE, 94425324343296, 94425324888063, +STORE, 94425324888064, 94425324986367, +STORE, 94425324986368, 94425325006847, +STORE, 140382015016960, 140382015188991, +ERASE, 140382015016960, 140382015016960, +STORE, 140382015016960, 140382015021055, +STORE, 140382015021056, 140382015188991, +ERASE, 140382015021056, 140382015021056, +STORE, 140382015021056, 140382015143935, +STORE, 140382015143936, 140382015176703, +STORE, 140382015176704, 140382015184895, +STORE, 140382015184896, 140382015188991, +STORE, 140737045585920, 140737045590015, +STORE, 140737045573632, 140737045585919, +STORE, 47250779979776, 47250779987967, +STORE, 47250779987968, 47250779996159, +STORE, 47250779996160, 47250780102655, +STORE, 47250780012544, 47250780102655, +STORE, 47250779996160, 47250780012543, +ERASE, 47250780012544, 47250780012544, +STORE, 47250780012544, 47250780086271, +STORE, 47250780086272, 47250780102655, +STORE, 47250780065792, 47250780086271, +STORE, 47250780012544, 47250780065791, +ERASE, 47250780012544, 47250780012544, +STORE, 47250780012544, 47250780065791, +STORE, 47250780082176, 47250780086271, +STORE, 47250780065792, 47250780082175, +ERASE, 47250780065792, 47250780065792, +STORE, 47250780065792, 47250780082175, +STORE, 47250780094464, 47250780102655, +STORE, 47250780086272, 47250780094463, +ERASE, 47250780086272, 47250780086272, +STORE, 47250780086272, 47250780094463, +ERASE, 47250780094464, 47250780094464, +STORE, 47250780094464, 47250780102655, +STORE, 47250780102656, 47250781941759, +STORE, 47250780241920, 47250781941759, +STORE, 47250780102656, 47250780241919, +ERASE, 47250780241920, 47250780241920, +STORE, 47250780241920, 47250781900799, +STORE, 47250781900800, 47250781941759, +STORE, 47250781585408, 47250781900799, +STORE, 47250780241920, 47250781585407, +ERASE, 47250780241920, 47250780241920, +STORE, 47250780241920, 47250781585407, +STORE, 47250781896704, 47250781900799, +STORE, 47250781585408, 47250781896703, +ERASE, 47250781585408, 47250781585408, +STORE, 47250781585408, 47250781896703, +STORE, 47250781925376, 47250781941759, +STORE, 47250781900800, 47250781925375, +ERASE, 47250781900800, 47250781900800, +STORE, 47250781900800, 47250781925375, +ERASE, 47250781925376, 47250781925376, +STORE, 47250781925376, 47250781941759, +STORE, 47250781925376, 47250781954047, +ERASE, 47250781900800, 47250781900800, +STORE, 47250781900800, 47250781917183, +STORE, 47250781917184, 47250781925375, +ERASE, 47250780086272, 47250780086272, +STORE, 47250780086272, 47250780090367, +STORE, 47250780090368, 47250780094463, +ERASE, 94425324986368, 94425324986368, +STORE, 94425324986368, 94425325002751, +STORE, 94425325002752, 94425325006847, +ERASE, 140382015176704, 140382015176704, +STORE, 140382015176704, 140382015180799, +STORE, 140382015180800, 140382015184895, +ERASE, 47250779979776, 47250779979776, +STORE, 94425351438336, 94425351573503, +STORE, 140737488347136, 140737488351231, +STORE, 140736801144832, 140737488351231, +ERASE, 140736801144832, 140736801144832, +STORE, 140736801144832, 140736801148927, +STORE, 94629429358592, 94629430071295, +ERASE, 94629429358592, 94629429358592, +STORE, 94629429358592, 94629429407743, +STORE, 94629429407744, 94629430071295, +ERASE, 94629429407744, 94629429407744, +STORE, 94629429407744, 94629429952511, +STORE, 94629429952512, 94629430050815, +STORE, 94629430050816, 94629430071295, +STORE, 139801685483520, 139801685655551, +ERASE, 139801685483520, 139801685483520, +STORE, 139801685483520, 139801685487615, +STORE, 139801685487616, 139801685655551, +ERASE, 139801685487616, 139801685487616, +STORE, 139801685487616, 139801685610495, +STORE, 139801685610496, 139801685643263, +STORE, 139801685643264, 139801685651455, +STORE, 139801685651456, 139801685655551, +STORE, 140736801198080, 140736801202175, +STORE, 140736801185792, 140736801198079, +STORE, 47831109513216, 47831109521407, +STORE, 47831109521408, 47831109529599, +STORE, 47831109529600, 47831109636095, +STORE, 47831109545984, 47831109636095, +STORE, 47831109529600, 47831109545983, +ERASE, 47831109545984, 47831109545984, +STORE, 47831109545984, 47831109619711, +STORE, 47831109619712, 47831109636095, +STORE, 47831109599232, 47831109619711, +STORE, 47831109545984, 47831109599231, +ERASE, 47831109545984, 47831109545984, +STORE, 47831109545984, 47831109599231, +STORE, 47831109615616, 47831109619711, +STORE, 47831109599232, 47831109615615, +ERASE, 47831109599232, 47831109599232, +STORE, 47831109599232, 47831109615615, +STORE, 47831109627904, 47831109636095, +STORE, 47831109619712, 47831109627903, +ERASE, 47831109619712, 47831109619712, +STORE, 47831109619712, 47831109627903, +ERASE, 47831109627904, 47831109627904, +STORE, 47831109627904, 47831109636095, +STORE, 47831109636096, 47831111475199, +STORE, 47831109775360, 47831111475199, +STORE, 47831109636096, 47831109775359, +ERASE, 47831109775360, 47831109775360, +STORE, 47831109775360, 47831111434239, +STORE, 47831111434240, 47831111475199, +STORE, 47831111118848, 47831111434239, +STORE, 47831109775360, 47831111118847, +ERASE, 47831109775360, 47831109775360, +STORE, 47831109775360, 47831111118847, +STORE, 47831111430144, 47831111434239, +STORE, 47831111118848, 47831111430143, +ERASE, 47831111118848, 47831111118848, +STORE, 47831111118848, 47831111430143, +STORE, 47831111458816, 47831111475199, +STORE, 47831111434240, 47831111458815, +ERASE, 47831111434240, 47831111434240, +STORE, 47831111434240, 47831111458815, +ERASE, 47831111458816, 47831111458816, +STORE, 47831111458816, 47831111475199, +STORE, 47831111458816, 47831111487487, +ERASE, 47831111434240, 47831111434240, +STORE, 47831111434240, 47831111450623, +STORE, 47831111450624, 47831111458815, +ERASE, 47831109619712, 47831109619712, +STORE, 47831109619712, 47831109623807, +STORE, 47831109623808, 47831109627903, +ERASE, 94629430050816, 94629430050816, +STORE, 94629430050816, 94629430067199, +STORE, 94629430067200, 94629430071295, +ERASE, 139801685643264, 139801685643264, +STORE, 139801685643264, 139801685647359, +STORE, 139801685647360, 139801685651455, +ERASE, 47831109513216, 47831109513216, +STORE, 140737488347136, 140737488351231, +STORE, 140729419612160, 140737488351231, +ERASE, 140729419612160, 140729419612160, +STORE, 140729419612160, 140729419616255, +STORE, 94443354148864, 94443354861567, +ERASE, 94443354148864, 94443354148864, +STORE, 94443354148864, 94443354198015, +STORE, 94443354198016, 94443354861567, +ERASE, 94443354198016, 94443354198016, +STORE, 94443354198016, 94443354742783, +STORE, 94443354742784, 94443354841087, +STORE, 94443354841088, 94443354861567, +STORE, 139741700038656, 139741700210687, +ERASE, 139741700038656, 139741700038656, +STORE, 139741700038656, 139741700042751, +STORE, 139741700042752, 139741700210687, +ERASE, 139741700042752, 139741700042752, +STORE, 139741700042752, 139741700165631, +STORE, 139741700165632, 139741700198399, +STORE, 139741700198400, 139741700206591, +STORE, 139741700206592, 139741700210687, +STORE, 140729420574720, 140729420578815, +STORE, 140729420562432, 140729420574719, +STORE, 47891094958080, 47891094966271, +STORE, 47891094966272, 47891094974463, +STORE, 47891094974464, 47891095080959, +STORE, 47891094990848, 47891095080959, +STORE, 47891094974464, 47891094990847, +ERASE, 47891094990848, 47891094990848, +STORE, 47891094990848, 47891095064575, +STORE, 47891095064576, 47891095080959, +STORE, 47891095044096, 47891095064575, +STORE, 47891094990848, 47891095044095, +ERASE, 47891094990848, 47891094990848, +STORE, 47891094990848, 47891095044095, +STORE, 47891095060480, 47891095064575, +STORE, 47891095044096, 47891095060479, +ERASE, 47891095044096, 47891095044096, +STORE, 47891095044096, 47891095060479, +STORE, 47891095072768, 47891095080959, +STORE, 47891095064576, 47891095072767, +ERASE, 47891095064576, 47891095064576, +STORE, 47891095064576, 47891095072767, +ERASE, 47891095072768, 47891095072768, +STORE, 47891095072768, 47891095080959, +STORE, 47891095080960, 47891096920063, +STORE, 47891095220224, 47891096920063, +STORE, 47891095080960, 47891095220223, +ERASE, 47891095220224, 47891095220224, +STORE, 47891095220224, 47891096879103, +STORE, 47891096879104, 47891096920063, +STORE, 47891096563712, 47891096879103, +STORE, 47891095220224, 47891096563711, +ERASE, 47891095220224, 47891095220224, +STORE, 47891095220224, 47891096563711, +STORE, 47891096875008, 47891096879103, +STORE, 47891096563712, 47891096875007, +ERASE, 47891096563712, 47891096563712, +STORE, 47891096563712, 47891096875007, +STORE, 47891096903680, 47891096920063, +STORE, 47891096879104, 47891096903679, +ERASE, 47891096879104, 47891096879104, +STORE, 47891096879104, 47891096903679, +ERASE, 47891096903680, 47891096903680, +STORE, 47891096903680, 47891096920063, +STORE, 47891096903680, 47891096932351, +ERASE, 47891096879104, 47891096879104, +STORE, 47891096879104, 47891096895487, +STORE, 47891096895488, 47891096903679, +ERASE, 47891095064576, 47891095064576, +STORE, 47891095064576, 47891095068671, +STORE, 47891095068672, 47891095072767, +ERASE, 94443354841088, 94443354841088, +STORE, 94443354841088, 94443354857471, +STORE, 94443354857472, 94443354861567, +ERASE, 139741700198400, 139741700198400, +STORE, 139741700198400, 139741700202495, +STORE, 139741700202496, 139741700206591, +ERASE, 47891094958080, 47891094958080, +STORE, 94443360825344, 94443360960511, +STORE, 140737488347136, 140737488351231, +STORE, 140722961661952, 140737488351231, +ERASE, 140722961661952, 140722961661952, +STORE, 140722961661952, 140722961666047, +STORE, 94878388944896, 94878389657599, +ERASE, 94878388944896, 94878388944896, +STORE, 94878388944896, 94878388994047, +STORE, 94878388994048, 94878389657599, +ERASE, 94878388994048, 94878388994048, +STORE, 94878388994048, 94878389538815, +STORE, 94878389538816, 94878389637119, +STORE, 94878389637120, 94878389657599, +STORE, 140210690056192, 140210690228223, +ERASE, 140210690056192, 140210690056192, +STORE, 140210690056192, 140210690060287, +STORE, 140210690060288, 140210690228223, +ERASE, 140210690060288, 140210690060288, +STORE, 140210690060288, 140210690183167, +STORE, 140210690183168, 140210690215935, +STORE, 140210690215936, 140210690224127, +STORE, 140210690224128, 140210690228223, +STORE, 140722963148800, 140722963152895, +STORE, 140722963136512, 140722963148799, +STORE, 47422104940544, 47422104948735, +STORE, 47422104948736, 47422104956927, +STORE, 47422104956928, 47422105063423, +STORE, 47422104973312, 47422105063423, +STORE, 47422104956928, 47422104973311, +ERASE, 47422104973312, 47422104973312, +STORE, 47422104973312, 47422105047039, +STORE, 47422105047040, 47422105063423, +STORE, 47422105026560, 47422105047039, +STORE, 47422104973312, 47422105026559, +ERASE, 47422104973312, 47422104973312, +STORE, 47422104973312, 47422105026559, +STORE, 47422105042944, 47422105047039, +STORE, 47422105026560, 47422105042943, +ERASE, 47422105026560, 47422105026560, +STORE, 47422105026560, 47422105042943, +STORE, 47422105055232, 47422105063423, +STORE, 47422105047040, 47422105055231, +ERASE, 47422105047040, 47422105047040, +STORE, 47422105047040, 47422105055231, +ERASE, 47422105055232, 47422105055232, +STORE, 47422105055232, 47422105063423, +STORE, 47422105063424, 47422106902527, +STORE, 47422105202688, 47422106902527, +STORE, 47422105063424, 47422105202687, +ERASE, 47422105202688, 47422105202688, +STORE, 47422105202688, 47422106861567, +STORE, 47422106861568, 47422106902527, +STORE, 47422106546176, 47422106861567, +STORE, 47422105202688, 47422106546175, +ERASE, 47422105202688, 47422105202688, +STORE, 47422105202688, 47422106546175, +STORE, 47422106857472, 47422106861567, +STORE, 47422106546176, 47422106857471, +ERASE, 47422106546176, 47422106546176, +STORE, 47422106546176, 47422106857471, +STORE, 47422106886144, 47422106902527, +STORE, 47422106861568, 47422106886143, +ERASE, 47422106861568, 47422106861568, +STORE, 47422106861568, 47422106886143, +ERASE, 47422106886144, 47422106886144, +STORE, 47422106886144, 47422106902527, +STORE, 47422106886144, 47422106914815, +ERASE, 47422106861568, 47422106861568, +STORE, 47422106861568, 47422106877951, +STORE, 47422106877952, 47422106886143, +ERASE, 47422105047040, 47422105047040, +STORE, 47422105047040, 47422105051135, +STORE, 47422105051136, 47422105055231, +ERASE, 94878389637120, 94878389637120, +STORE, 94878389637120, 94878389653503, +STORE, 94878389653504, 94878389657599, +ERASE, 140210690215936, 140210690215936, +STORE, 140210690215936, 140210690220031, +STORE, 140210690220032, 140210690224127, +ERASE, 47422104940544, 47422104940544, +STORE, 140737488347136, 140737488351231, +STORE, 140727690309632, 140737488351231, +ERASE, 140727690309632, 140727690309632, +STORE, 140727690309632, 140727690313727, +STORE, 94121892208640, 94121892921343, +ERASE, 94121892208640, 94121892208640, +STORE, 94121892208640, 94121892257791, +STORE, 94121892257792, 94121892921343, +ERASE, 94121892257792, 94121892257792, +STORE, 94121892257792, 94121892802559, +STORE, 94121892802560, 94121892900863, +STORE, 94121892900864, 94121892921343, +STORE, 140662438326272, 140662438498303, +ERASE, 140662438326272, 140662438326272, +STORE, 140662438326272, 140662438330367, +STORE, 140662438330368, 140662438498303, +ERASE, 140662438330368, 140662438330368, +STORE, 140662438330368, 140662438453247, +STORE, 140662438453248, 140662438486015, +STORE, 140662438486016, 140662438494207, +STORE, 140662438494208, 140662438498303, +STORE, 140727690379264, 140727690383359, +STORE, 140727690366976, 140727690379263, +STORE, 46970356670464, 46970356678655, +STORE, 46970356678656, 46970356686847, +STORE, 46970356686848, 46970356793343, +STORE, 46970356703232, 46970356793343, +STORE, 46970356686848, 46970356703231, +ERASE, 46970356703232, 46970356703232, +STORE, 46970356703232, 46970356776959, +STORE, 46970356776960, 46970356793343, +STORE, 46970356756480, 46970356776959, +STORE, 46970356703232, 46970356756479, +ERASE, 46970356703232, 46970356703232, +STORE, 46970356703232, 46970356756479, +STORE, 46970356772864, 46970356776959, +STORE, 46970356756480, 46970356772863, +ERASE, 46970356756480, 46970356756480, +STORE, 46970356756480, 46970356772863, +STORE, 46970356785152, 46970356793343, +STORE, 46970356776960, 46970356785151, +ERASE, 46970356776960, 46970356776960, +STORE, 46970356776960, 46970356785151, +ERASE, 46970356785152, 46970356785152, +STORE, 46970356785152, 46970356793343, +STORE, 46970356793344, 46970358632447, +STORE, 46970356932608, 46970358632447, +STORE, 46970356793344, 46970356932607, +ERASE, 46970356932608, 46970356932608, +STORE, 46970356932608, 46970358591487, +STORE, 46970358591488, 46970358632447, +STORE, 46970358276096, 46970358591487, +STORE, 46970356932608, 46970358276095, +ERASE, 46970356932608, 46970356932608, +STORE, 46970356932608, 46970358276095, +STORE, 46970358587392, 46970358591487, +STORE, 46970358276096, 46970358587391, +ERASE, 46970358276096, 46970358276096, +STORE, 46970358276096, 46970358587391, +STORE, 46970358616064, 46970358632447, +STORE, 46970358591488, 46970358616063, +ERASE, 46970358591488, 46970358591488, +STORE, 46970358591488, 46970358616063, +ERASE, 46970358616064, 46970358616064, +STORE, 46970358616064, 46970358632447, +STORE, 46970358616064, 46970358644735, +ERASE, 46970358591488, 46970358591488, +STORE, 46970358591488, 46970358607871, +STORE, 46970358607872, 46970358616063, +ERASE, 46970356776960, 46970356776960, +STORE, 46970356776960, 46970356781055, +STORE, 46970356781056, 46970356785151, +ERASE, 94121892900864, 94121892900864, +STORE, 94121892900864, 94121892917247, +STORE, 94121892917248, 94121892921343, +ERASE, 140662438486016, 140662438486016, +STORE, 140662438486016, 140662438490111, +STORE, 140662438490112, 140662438494207, +ERASE, 46970356670464, 46970356670464, +STORE, 94121898610688, 94121898745855, +STORE, 140737488347136, 140737488351231, +STORE, 140737189351424, 140737488351231, +ERASE, 140737189351424, 140737189351424, +STORE, 140737189351424, 140737189355519, +STORE, 93847948832768, 93847949545471, +ERASE, 93847948832768, 93847948832768, +STORE, 93847948832768, 93847948881919, +STORE, 93847948881920, 93847949545471, +ERASE, 93847948881920, 93847948881920, +STORE, 93847948881920, 93847949426687, +STORE, 93847949426688, 93847949524991, +STORE, 93847949524992, 93847949545471, +STORE, 139698989985792, 139698990157823, +ERASE, 139698989985792, 139698989985792, +STORE, 139698989985792, 139698989989887, +STORE, 139698989989888, 139698990157823, +ERASE, 139698989989888, 139698989989888, +STORE, 139698989989888, 139698990112767, +STORE, 139698990112768, 139698990145535, +STORE, 139698990145536, 139698990153727, +STORE, 139698990153728, 139698990157823, +STORE, 140737189744640, 140737189748735, +STORE, 140737189732352, 140737189744639, +STORE, 47933805010944, 47933805019135, +STORE, 47933805019136, 47933805027327, +STORE, 47933805027328, 47933805133823, +STORE, 47933805043712, 47933805133823, +STORE, 47933805027328, 47933805043711, +ERASE, 47933805043712, 47933805043712, +STORE, 47933805043712, 47933805117439, +STORE, 47933805117440, 47933805133823, +STORE, 47933805096960, 47933805117439, +STORE, 47933805043712, 47933805096959, +ERASE, 47933805043712, 47933805043712, +STORE, 47933805043712, 47933805096959, +STORE, 47933805113344, 47933805117439, +STORE, 47933805096960, 47933805113343, +ERASE, 47933805096960, 47933805096960, +STORE, 47933805096960, 47933805113343, +STORE, 47933805125632, 47933805133823, +STORE, 47933805117440, 47933805125631, +ERASE, 47933805117440, 47933805117440, +STORE, 47933805117440, 47933805125631, +ERASE, 47933805125632, 47933805125632, +STORE, 47933805125632, 47933805133823, +STORE, 47933805133824, 47933806972927, +STORE, 47933805273088, 47933806972927, +STORE, 47933805133824, 47933805273087, +ERASE, 47933805273088, 47933805273088, +STORE, 47933805273088, 47933806931967, +STORE, 47933806931968, 47933806972927, +STORE, 47933806616576, 47933806931967, +STORE, 47933805273088, 47933806616575, +ERASE, 47933805273088, 47933805273088, +STORE, 47933805273088, 47933806616575, +STORE, 47933806927872, 47933806931967, +STORE, 47933806616576, 47933806927871, +ERASE, 47933806616576, 47933806616576, +STORE, 47933806616576, 47933806927871, +STORE, 47933806956544, 47933806972927, +STORE, 47933806931968, 47933806956543, +ERASE, 47933806931968, 47933806931968, +STORE, 47933806931968, 47933806956543, +ERASE, 47933806956544, 47933806956544, +STORE, 47933806956544, 47933806972927, +STORE, 47933806956544, 47933806985215, +ERASE, 47933806931968, 47933806931968, +STORE, 47933806931968, 47933806948351, +STORE, 47933806948352, 47933806956543, +ERASE, 47933805117440, 47933805117440, +STORE, 47933805117440, 47933805121535, +STORE, 47933805121536, 47933805125631, +ERASE, 93847949524992, 93847949524992, +STORE, 93847949524992, 93847949541375, +STORE, 93847949541376, 93847949545471, +ERASE, 139698990145536, 139698990145536, +STORE, 139698990145536, 139698990149631, +STORE, 139698990149632, 139698990153727, +ERASE, 47933805010944, 47933805010944, +STORE, 140737488347136, 140737488351231, +STORE, 140725553991680, 140737488351231, +ERASE, 140725553991680, 140725553991680, +STORE, 140725553991680, 140725553995775, +STORE, 93980056248320, 93980056961023, +ERASE, 93980056248320, 93980056248320, +STORE, 93980056248320, 93980056297471, +STORE, 93980056297472, 93980056961023, +ERASE, 93980056297472, 93980056297472, +STORE, 93980056297472, 93980056842239, +STORE, 93980056842240, 93980056940543, +STORE, 93980056940544, 93980056961023, +STORE, 140146588971008, 140146589143039, +ERASE, 140146588971008, 140146588971008, +STORE, 140146588971008, 140146588975103, +STORE, 140146588975104, 140146589143039, +ERASE, 140146588975104, 140146588975104, +STORE, 140146588975104, 140146589097983, +STORE, 140146589097984, 140146589130751, +STORE, 140146589130752, 140146589138943, +STORE, 140146589138944, 140146589143039, +STORE, 140725554860032, 140725554864127, +STORE, 140725554847744, 140725554860031, +STORE, 47486206025728, 47486206033919, +STORE, 47486206033920, 47486206042111, +STORE, 47486206042112, 47486206148607, +STORE, 47486206058496, 47486206148607, +STORE, 47486206042112, 47486206058495, +ERASE, 47486206058496, 47486206058496, +STORE, 47486206058496, 47486206132223, +STORE, 47486206132224, 47486206148607, +STORE, 47486206111744, 47486206132223, +STORE, 47486206058496, 47486206111743, +ERASE, 47486206058496, 47486206058496, +STORE, 47486206058496, 47486206111743, +STORE, 47486206128128, 47486206132223, +STORE, 47486206111744, 47486206128127, +ERASE, 47486206111744, 47486206111744, +STORE, 47486206111744, 47486206128127, +STORE, 47486206140416, 47486206148607, +STORE, 47486206132224, 47486206140415, +ERASE, 47486206132224, 47486206132224, +STORE, 47486206132224, 47486206140415, +ERASE, 47486206140416, 47486206140416, +STORE, 47486206140416, 47486206148607, +STORE, 47486206148608, 47486207987711, +STORE, 47486206287872, 47486207987711, +STORE, 47486206148608, 47486206287871, +ERASE, 47486206287872, 47486206287872, +STORE, 47486206287872, 47486207946751, +STORE, 47486207946752, 47486207987711, +STORE, 47486207631360, 47486207946751, +STORE, 47486206287872, 47486207631359, +ERASE, 47486206287872, 47486206287872, +STORE, 47486206287872, 47486207631359, +STORE, 47486207942656, 47486207946751, +STORE, 47486207631360, 47486207942655, +ERASE, 47486207631360, 47486207631360, +STORE, 47486207631360, 47486207942655, +STORE, 47486207971328, 47486207987711, +STORE, 47486207946752, 47486207971327, +ERASE, 47486207946752, 47486207946752, +STORE, 47486207946752, 47486207971327, +ERASE, 47486207971328, 47486207971328, +STORE, 47486207971328, 47486207987711, +STORE, 47486207971328, 47486207999999, +ERASE, 47486207946752, 47486207946752, +STORE, 47486207946752, 47486207963135, +STORE, 47486207963136, 47486207971327, +ERASE, 47486206132224, 47486206132224, +STORE, 47486206132224, 47486206136319, +STORE, 47486206136320, 47486206140415, +ERASE, 93980056940544, 93980056940544, +STORE, 93980056940544, 93980056956927, +STORE, 93980056956928, 93980056961023, +ERASE, 140146589130752, 140146589130752, +STORE, 140146589130752, 140146589134847, +STORE, 140146589134848, 140146589138943, +ERASE, 47486206025728, 47486206025728, +STORE, 93980070006784, 93980070141951, +STORE, 140737488347136, 140737488351231, +STORE, 140727334776832, 140737488351231, +ERASE, 140727334776832, 140727334776832, +STORE, 140727334776832, 140727334780927, +STORE, 94049747247104, 94049747959807, +ERASE, 94049747247104, 94049747247104, +STORE, 94049747247104, 94049747296255, +STORE, 94049747296256, 94049747959807, +ERASE, 94049747296256, 94049747296256, +STORE, 94049747296256, 94049747841023, +STORE, 94049747841024, 94049747939327, +STORE, 94049747939328, 94049747959807, +STORE, 140227307216896, 140227307388927, +ERASE, 140227307216896, 140227307216896, +STORE, 140227307216896, 140227307220991, +STORE, 140227307220992, 140227307388927, +ERASE, 140227307220992, 140227307220992, +STORE, 140227307220992, 140227307343871, +STORE, 140227307343872, 140227307376639, +STORE, 140227307376640, 140227307384831, +STORE, 140227307384832, 140227307388927, +STORE, 140727335337984, 140727335342079, +STORE, 140727335325696, 140727335337983, +STORE, 47405487779840, 47405487788031, +STORE, 47405487788032, 47405487796223, +STORE, 47405487796224, 47405487902719, +STORE, 47405487812608, 47405487902719, +STORE, 47405487796224, 47405487812607, +ERASE, 47405487812608, 47405487812608, +STORE, 47405487812608, 47405487886335, +STORE, 47405487886336, 47405487902719, +STORE, 47405487865856, 47405487886335, +STORE, 47405487812608, 47405487865855, +ERASE, 47405487812608, 47405487812608, +STORE, 47405487812608, 47405487865855, +STORE, 47405487882240, 47405487886335, +STORE, 47405487865856, 47405487882239, +ERASE, 47405487865856, 47405487865856, +STORE, 47405487865856, 47405487882239, +STORE, 47405487894528, 47405487902719, +STORE, 47405487886336, 47405487894527, +ERASE, 47405487886336, 47405487886336, +STORE, 47405487886336, 47405487894527, +ERASE, 47405487894528, 47405487894528, +STORE, 47405487894528, 47405487902719, +STORE, 47405487902720, 47405489741823, +STORE, 47405488041984, 47405489741823, +STORE, 47405487902720, 47405488041983, +ERASE, 47405488041984, 47405488041984, +STORE, 47405488041984, 47405489700863, +STORE, 47405489700864, 47405489741823, +STORE, 47405489385472, 47405489700863, +STORE, 47405488041984, 47405489385471, +ERASE, 47405488041984, 47405488041984, +STORE, 47405488041984, 47405489385471, +STORE, 47405489696768, 47405489700863, +STORE, 47405489385472, 47405489696767, +ERASE, 47405489385472, 47405489385472, +STORE, 47405489385472, 47405489696767, +STORE, 47405489725440, 47405489741823, +STORE, 47405489700864, 47405489725439, +ERASE, 47405489700864, 47405489700864, +STORE, 47405489700864, 47405489725439, +ERASE, 47405489725440, 47405489725440, +STORE, 47405489725440, 47405489741823, +STORE, 47405489725440, 47405489754111, +ERASE, 47405489700864, 47405489700864, +STORE, 47405489700864, 47405489717247, +STORE, 47405489717248, 47405489725439, +ERASE, 47405487886336, 47405487886336, +STORE, 47405487886336, 47405487890431, +STORE, 47405487890432, 47405487894527, +ERASE, 94049747939328, 94049747939328, +STORE, 94049747939328, 94049747955711, +STORE, 94049747955712, 94049747959807, +ERASE, 140227307376640, 140227307376640, +STORE, 140227307376640, 140227307380735, +STORE, 140227307380736, 140227307384831, +ERASE, 47405487779840, 47405487779840, +STORE, 94049758810112, 94049758945279, +STORE, 140737488347136, 140737488351231, +STORE, 140727079718912, 140737488351231, +ERASE, 140727079718912, 140727079718912, +STORE, 140727079718912, 140727079723007, +STORE, 94250996527104, 94250997239807, +ERASE, 94250996527104, 94250996527104, +STORE, 94250996527104, 94250996576255, +STORE, 94250996576256, 94250997239807, +ERASE, 94250996576256, 94250996576256, +STORE, 94250996576256, 94250997121023, +STORE, 94250997121024, 94250997219327, +STORE, 94250997219328, 94250997239807, +STORE, 140060022587392, 140060022759423, +ERASE, 140060022587392, 140060022587392, +STORE, 140060022587392, 140060022591487, +STORE, 140060022591488, 140060022759423, +ERASE, 140060022591488, 140060022591488, +STORE, 140060022591488, 140060022714367, +STORE, 140060022714368, 140060022747135, +STORE, 140060022747136, 140060022755327, +STORE, 140060022755328, 140060022759423, +STORE, 140727079788544, 140727079792639, +STORE, 140727079776256, 140727079788543, +/* this next one caused issues when lowering the efficiency */ +STORE, 47572772409344, 47572772417535, +STORE, 47572772417536, 47572772425727, +STORE, 47572772425728, 47572772532223, +STORE, 47572772442112, 47572772532223, +STORE, 47572772425728, 47572772442111, +ERASE, 47572772442112, 47572772442112, +STORE, 47572772442112, 47572772515839, +STORE, 47572772515840, 47572772532223, +STORE, 47572772495360, 47572772515839, +STORE, 47572772442112, 47572772495359, +ERASE, 47572772442112, 47572772442112, +STORE, 47572772442112, 47572772495359, +STORE, 47572772511744, 47572772515839, +STORE, 47572772495360, 47572772511743, +ERASE, 47572772495360, 47572772495360, +STORE, 47572772495360, 47572772511743, +STORE, 47572772524032, 47572772532223, +STORE, 47572772515840, 47572772524031, +ERASE, 47572772515840, 47572772515840, +STORE, 47572772515840, 47572772524031, +ERASE, 47572772524032, 47572772524032, +STORE, 47572772524032, 47572772532223, +STORE, 47572772532224, 47572774371327, +STORE, 47572772671488, 47572774371327, +STORE, 47572772532224, 47572772671487, +ERASE, 47572772671488, 47572772671488, +STORE, 47572772671488, 47572774330367, +STORE, 47572774330368, 47572774371327, +STORE, 47572774014976, 47572774330367, +STORE, 47572772671488, 47572774014975, +ERASE, 47572772671488, 47572772671488, +STORE, 47572772671488, 47572774014975, +STORE, 47572774326272, 47572774330367, +STORE, 47572774014976, 47572774326271, +ERASE, 47572774014976, 47572774014976, +STORE, 47572774014976, 47572774326271, +STORE, 47572774354944, 47572774371327, +STORE, 47572774330368, 47572774354943, +ERASE, 47572774330368, 47572774330368, +STORE, 47572774330368, 47572774354943, +ERASE, 47572774354944, 47572774354944, +STORE, 47572774354944, 47572774371327, +STORE, 47572774354944, 47572774383615, +ERASE, 47572774330368, 47572774330368, +STORE, 47572774330368, 47572774346751, +STORE, 47572774346752, 47572774354943, +ERASE, 47572772515840, 47572772515840, +STORE, 47572772515840, 47572772519935, +STORE, 47572772519936, 47572772524031, +ERASE, 94250997219328, 94250997219328, +STORE, 94250997219328, 94250997235711, +STORE, 94250997235712, 94250997239807, +ERASE, 140060022747136, 140060022747136, +STORE, 140060022747136, 140060022751231, +STORE, 140060022751232, 140060022755327, +ERASE, 47572772409344, 47572772409344, +STORE, 94251018305536, 94251018440703, +STORE, 140737488347136, 140737488351231, +STORE, 140730012389376, 140737488351231, +ERASE, 140730012389376, 140730012389376, +STORE, 140730012389376, 140730012393471, +STORE, 94382607675392, 94382607695871, +ERASE, 94382607675392, 94382607675392, +STORE, 94382607675392, 94382607679487, +STORE, 94382607679488, 94382607695871, +ERASE, 94382607679488, 94382607679488, +STORE, 94382607679488, 94382607683583, +STORE, 94382607683584, 94382607687679, +STORE, 94382607687680, 94382607695871, +STORE, 140252451454976, 140252451627007, +ERASE, 140252451454976, 140252451454976, +STORE, 140252451454976, 140252451459071, +STORE, 140252451459072, 140252451627007, +ERASE, 140252451459072, 140252451459072, +STORE, 140252451459072, 140252451581951, +STORE, 140252451581952, 140252451614719, +STORE, 140252451614720, 140252451622911, +STORE, 140252451622912, 140252451627007, +STORE, 140730013548544, 140730013552639, +STORE, 140730013536256, 140730013548543, +STORE, 47380343541760, 47380343549951, +STORE, 47380343549952, 47380343558143, +STORE, 47380343558144, 47380345397247, +STORE, 47380343697408, 47380345397247, +STORE, 47380343558144, 47380343697407, +ERASE, 47380343697408, 47380343697408, +STORE, 47380343697408, 47380345356287, +STORE, 47380345356288, 47380345397247, +STORE, 47380345040896, 47380345356287, +STORE, 47380343697408, 47380345040895, +ERASE, 47380343697408, 47380343697408, +STORE, 47380343697408, 47380345040895, +STORE, 47380345352192, 47380345356287, +STORE, 47380345040896, 47380345352191, +ERASE, 47380345040896, 47380345040896, +STORE, 47380345040896, 47380345352191, +STORE, 47380345380864, 47380345397247, +STORE, 47380345356288, 47380345380863, +ERASE, 47380345356288, 47380345356288, +STORE, 47380345356288, 47380345380863, +ERASE, 47380345380864, 47380345380864, +STORE, 47380345380864, 47380345397247, +ERASE, 47380345356288, 47380345356288, +STORE, 47380345356288, 47380345372671, +STORE, 47380345372672, 47380345380863, +ERASE, 94382607687680, 94382607687680, +STORE, 94382607687680, 94382607691775, +STORE, 94382607691776, 94382607695871, +ERASE, 140252451614720, 140252451614720, +STORE, 140252451614720, 140252451618815, +STORE, 140252451618816, 140252451622911, +ERASE, 47380343541760, 47380343541760, +STORE, 94382626803712, 94382626938879, +STORE, 140737488347136, 140737488351231, +STORE, 140730900271104, 140737488351231, +ERASE, 140730900271104, 140730900271104, +STORE, 140730900271104, 140730900275199, +STORE, 93855478120448, 93855478337535, +ERASE, 93855478120448, 93855478120448, +STORE, 93855478120448, 93855478198271, +STORE, 93855478198272, 93855478337535, +ERASE, 93855478198272, 93855478198272, +STORE, 93855478198272, 93855478243327, +STORE, 93855478243328, 93855478288383, +STORE, 93855478288384, 93855478337535, +STORE, 140092686573568, 140092686745599, +ERASE, 140092686573568, 140092686573568, +STORE, 140092686573568, 140092686577663, +STORE, 140092686577664, 140092686745599, +ERASE, 140092686577664, 140092686577664, +STORE, 140092686577664, 140092686700543, +STORE, 140092686700544, 140092686733311, +STORE, 140092686733312, 140092686741503, +STORE, 140092686741504, 140092686745599, +STORE, 140730900537344, 140730900541439, +STORE, 140730900525056, 140730900537343, +STORE, 47540108423168, 47540108431359, +STORE, 47540108431360, 47540108439551, +STORE, 47540108439552, 47540110278655, +STORE, 47540108578816, 47540110278655, +STORE, 47540108439552, 47540108578815, +ERASE, 47540108578816, 47540108578816, +STORE, 47540108578816, 47540110237695, +STORE, 47540110237696, 47540110278655, +STORE, 47540109922304, 47540110237695, +STORE, 47540108578816, 47540109922303, +ERASE, 47540108578816, 47540108578816, +STORE, 47540108578816, 47540109922303, +STORE, 47540110233600, 47540110237695, +STORE, 47540109922304, 47540110233599, +ERASE, 47540109922304, 47540109922304, +STORE, 47540109922304, 47540110233599, +STORE, 47540110262272, 47540110278655, +STORE, 47540110237696, 47540110262271, +ERASE, 47540110237696, 47540110237696, +STORE, 47540110237696, 47540110262271, +ERASE, 47540110262272, 47540110262272, +STORE, 47540110262272, 47540110278655, +ERASE, 47540110237696, 47540110237696, +STORE, 47540110237696, 47540110254079, +STORE, 47540110254080, 47540110262271, +ERASE, 93855478288384, 93855478288384, +STORE, 93855478288384, 93855478333439, +STORE, 93855478333440, 93855478337535, +ERASE, 140092686733312, 140092686733312, +STORE, 140092686733312, 140092686737407, +STORE, 140092686737408, 140092686741503, +ERASE, 47540108423168, 47540108423168, +STORE, 93855492222976, 93855492358143, +STORE, 93855492222976, 93855492493311, +STORE, 140737488347136, 140737488351231, +STORE, 140733498146816, 140737488351231, +ERASE, 140733498146816, 140733498146816, +STORE, 140733498146816, 140733498150911, +STORE, 94170739654656, 94170740367359, +ERASE, 94170739654656, 94170739654656, +STORE, 94170739654656, 94170739703807, +STORE, 94170739703808, 94170740367359, +ERASE, 94170739703808, 94170739703808, +STORE, 94170739703808, 94170740248575, +STORE, 94170740248576, 94170740346879, +STORE, 94170740346880, 94170740367359, +STORE, 140024788877312, 140024789049343, +ERASE, 140024788877312, 140024788877312, +STORE, 140024788877312, 140024788881407, +STORE, 140024788881408, 140024789049343, +ERASE, 140024788881408, 140024788881408, +STORE, 140024788881408, 140024789004287, +STORE, 140024789004288, 140024789037055, +STORE, 140024789037056, 140024789045247, +STORE, 140024789045248, 140024789049343, +STORE, 140733499023360, 140733499027455, +STORE, 140733499011072, 140733499023359, +STORE, 47608006119424, 47608006127615, +STORE, 47608006127616, 47608006135807, +STORE, 47608006135808, 47608006242303, +STORE, 47608006152192, 47608006242303, +STORE, 47608006135808, 47608006152191, +ERASE, 47608006152192, 47608006152192, +STORE, 47608006152192, 47608006225919, +STORE, 47608006225920, 47608006242303, +STORE, 47608006205440, 47608006225919, +STORE, 47608006152192, 47608006205439, +ERASE, 47608006152192, 47608006152192, +STORE, 47608006152192, 47608006205439, +STORE, 47608006221824, 47608006225919, +STORE, 47608006205440, 47608006221823, +ERASE, 47608006205440, 47608006205440, +STORE, 47608006205440, 47608006221823, +STORE, 47608006234112, 47608006242303, +STORE, 47608006225920, 47608006234111, +ERASE, 47608006225920, 47608006225920, +STORE, 47608006225920, 47608006234111, +ERASE, 47608006234112, 47608006234112, +STORE, 47608006234112, 47608006242303, +STORE, 47608006242304, 47608008081407, +STORE, 47608006381568, 47608008081407, +STORE, 47608006242304, 47608006381567, +ERASE, 47608006381568, 47608006381568, +STORE, 47608006381568, 47608008040447, +STORE, 47608008040448, 47608008081407, +STORE, 47608007725056, 47608008040447, +STORE, 47608006381568, 47608007725055, +ERASE, 47608006381568, 47608006381568, +STORE, 47608006381568, 47608007725055, +STORE, 47608008036352, 47608008040447, +STORE, 47608007725056, 47608008036351, +ERASE, 47608007725056, 47608007725056, +STORE, 47608007725056, 47608008036351, +STORE, 47608008065024, 47608008081407, +STORE, 47608008040448, 47608008065023, +ERASE, 47608008040448, 47608008040448, +STORE, 47608008040448, 47608008065023, +ERASE, 47608008065024, 47608008065024, +STORE, 47608008065024, 47608008081407, +STORE, 47608008065024, 47608008093695, +ERASE, 47608008040448, 47608008040448, +STORE, 47608008040448, 47608008056831, +STORE, 47608008056832, 47608008065023, +ERASE, 47608006225920, 47608006225920, +STORE, 47608006225920, 47608006230015, +STORE, 47608006230016, 47608006234111, +ERASE, 94170740346880, 94170740346880, +STORE, 94170740346880, 94170740363263, +STORE, 94170740363264, 94170740367359, +ERASE, 140024789037056, 140024789037056, +STORE, 140024789037056, 140024789041151, +STORE, 140024789041152, 140024789045247, +ERASE, 47608006119424, 47608006119424, +STORE, 140737488347136, 140737488351231, +STORE, 140730264326144, 140737488351231, +ERASE, 140730264326144, 140730264326144, +STORE, 140730264326144, 140730264330239, +STORE, 94653216407552, 94653217120255, +ERASE, 94653216407552, 94653216407552, +STORE, 94653216407552, 94653216456703, +STORE, 94653216456704, 94653217120255, +ERASE, 94653216456704, 94653216456704, +STORE, 94653216456704, 94653217001471, +STORE, 94653217001472, 94653217099775, +STORE, 94653217099776, 94653217120255, +STORE, 140103617011712, 140103617183743, +ERASE, 140103617011712, 140103617011712, +STORE, 140103617011712, 140103617015807, +STORE, 140103617015808, 140103617183743, +ERASE, 140103617015808, 140103617015808, +STORE, 140103617015808, 140103617138687, +STORE, 140103617138688, 140103617171455, +STORE, 140103617171456, 140103617179647, +STORE, 140103617179648, 140103617183743, +STORE, 140730265427968, 140730265432063, +STORE, 140730265415680, 140730265427967, +STORE, 47529177985024, 47529177993215, +STORE, 47529177993216, 47529178001407, +STORE, 47529178001408, 47529178107903, +STORE, 47529178017792, 47529178107903, +STORE, 47529178001408, 47529178017791, +ERASE, 47529178017792, 47529178017792, +STORE, 47529178017792, 47529178091519, +STORE, 47529178091520, 47529178107903, +STORE, 47529178071040, 47529178091519, +STORE, 47529178017792, 47529178071039, +ERASE, 47529178017792, 47529178017792, +STORE, 47529178017792, 47529178071039, +STORE, 47529178087424, 47529178091519, +STORE, 47529178071040, 47529178087423, +ERASE, 47529178071040, 47529178071040, +STORE, 47529178071040, 47529178087423, +STORE, 47529178099712, 47529178107903, +STORE, 47529178091520, 47529178099711, +ERASE, 47529178091520, 47529178091520, +STORE, 47529178091520, 47529178099711, +ERASE, 47529178099712, 47529178099712, +STORE, 47529178099712, 47529178107903, +STORE, 47529178107904, 47529179947007, +STORE, 47529178247168, 47529179947007, +STORE, 47529178107904, 47529178247167, +ERASE, 47529178247168, 47529178247168, +STORE, 47529178247168, 47529179906047, +STORE, 47529179906048, 47529179947007, +STORE, 47529179590656, 47529179906047, +STORE, 47529178247168, 47529179590655, +ERASE, 47529178247168, 47529178247168, +STORE, 47529178247168, 47529179590655, +STORE, 47529179901952, 47529179906047, +STORE, 47529179590656, 47529179901951, +ERASE, 47529179590656, 47529179590656, +STORE, 47529179590656, 47529179901951, +STORE, 47529179930624, 47529179947007, +STORE, 47529179906048, 47529179930623, +ERASE, 47529179906048, 47529179906048, +STORE, 47529179906048, 47529179930623, +ERASE, 47529179930624, 47529179930624, +STORE, 47529179930624, 47529179947007, +STORE, 47529179930624, 47529179959295, +ERASE, 47529179906048, 47529179906048, +STORE, 47529179906048, 47529179922431, +STORE, 47529179922432, 47529179930623, +ERASE, 47529178091520, 47529178091520, +STORE, 47529178091520, 47529178095615, +STORE, 47529178095616, 47529178099711, +ERASE, 94653217099776, 94653217099776, +STORE, 94653217099776, 94653217116159, +STORE, 94653217116160, 94653217120255, +ERASE, 140103617171456, 140103617171456, +STORE, 140103617171456, 140103617175551, +STORE, 140103617175552, 140103617179647, +ERASE, 47529177985024, 47529177985024, +STORE, 94653241135104, 94653241270271, +STORE, 140737488347136, 140737488351231, +STORE, 140736284549120, 140737488351231, +ERASE, 140736284549120, 140736284549120, +STORE, 140736284549120, 140736284553215, +STORE, 93963663822848, 93963664506879, +ERASE, 93963663822848, 93963663822848, +STORE, 93963663822848, 93963663884287, +STORE, 93963663884288, 93963664506879, +ERASE, 93963663884288, 93963663884288, +STORE, 93963663884288, 93963664240639, +STORE, 93963664240640, 93963664379903, +STORE, 93963664379904, 93963664506879, +STORE, 140450188439552, 140450188611583, +ERASE, 140450188439552, 140450188439552, +STORE, 140450188439552, 140450188443647, +STORE, 140450188443648, 140450188611583, +ERASE, 140450188443648, 140450188443648, +STORE, 140450188443648, 140450188566527, +STORE, 140450188566528, 140450188599295, +STORE, 140450188599296, 140450188607487, +STORE, 140450188607488, 140450188611583, +STORE, 140736284577792, 140736284581887, +STORE, 140736284565504, 140736284577791, +STORE, 47182606557184, 47182606565375, +STORE, 47182606565376, 47182606573567, +STORE, 47182606573568, 47182608412671, +STORE, 47182606712832, 47182608412671, +STORE, 47182606573568, 47182606712831, +ERASE, 47182606712832, 47182606712832, +STORE, 47182606712832, 47182608371711, +STORE, 47182608371712, 47182608412671, +STORE, 47182608056320, 47182608371711, +STORE, 47182606712832, 47182608056319, +ERASE, 47182606712832, 47182606712832, +STORE, 47182606712832, 47182608056319, +STORE, 47182608367616, 47182608371711, +STORE, 47182608056320, 47182608367615, +ERASE, 47182608056320, 47182608056320, +STORE, 47182608056320, 47182608367615, +STORE, 47182608396288, 47182608412671, +STORE, 47182608371712, 47182608396287, +ERASE, 47182608371712, 47182608371712, +STORE, 47182608371712, 47182608396287, +ERASE, 47182608396288, 47182608396288, +STORE, 47182608396288, 47182608412671, +STORE, 47182608412672, 47182608523263, +STORE, 47182608429056, 47182608523263, +STORE, 47182608412672, 47182608429055, +ERASE, 47182608429056, 47182608429056, +STORE, 47182608429056, 47182608515071, +STORE, 47182608515072, 47182608523263, +STORE, 47182608490496, 47182608515071, +STORE, 47182608429056, 47182608490495, +ERASE, 47182608429056, 47182608429056, +STORE, 47182608429056, 47182608490495, +STORE, 47182608510976, 47182608515071, +STORE, 47182608490496, 47182608510975, +ERASE, 47182608490496, 47182608490496, +STORE, 47182608490496, 47182608510975, +ERASE, 47182608515072, 47182608515072, +STORE, 47182608515072, 47182608523263, +STORE, 47182608523264, 47182608568319, +ERASE, 47182608523264, 47182608523264, +STORE, 47182608523264, 47182608531455, +STORE, 47182608531456, 47182608568319, +STORE, 47182608551936, 47182608568319, +STORE, 47182608531456, 47182608551935, +ERASE, 47182608531456, 47182608531456, +STORE, 47182608531456, 47182608551935, +STORE, 47182608560128, 47182608568319, +STORE, 47182608551936, 47182608560127, +ERASE, 47182608551936, 47182608551936, +STORE, 47182608551936, 47182608568319, +ERASE, 47182608551936, 47182608551936, +STORE, 47182608551936, 47182608560127, +STORE, 47182608560128, 47182608568319, +ERASE, 47182608560128, 47182608560128, +STORE, 47182608560128, 47182608568319, +STORE, 47182608568320, 47182608916479, +STORE, 47182608609280, 47182608916479, +STORE, 47182608568320, 47182608609279, +ERASE, 47182608609280, 47182608609280, +STORE, 47182608609280, 47182608891903, +STORE, 47182608891904, 47182608916479, +STORE, 47182608822272, 47182608891903, +STORE, 47182608609280, 47182608822271, +ERASE, 47182608609280, 47182608609280, +STORE, 47182608609280, 47182608822271, +STORE, 47182608887808, 47182608891903, +STORE, 47182608822272, 47182608887807, +ERASE, 47182608822272, 47182608822272, +STORE, 47182608822272, 47182608887807, +ERASE, 47182608891904, 47182608891904, +STORE, 47182608891904, 47182608916479, +STORE, 47182608916480, 47182611177471, +STORE, 47182609068032, 47182611177471, +STORE, 47182608916480, 47182609068031, +ERASE, 47182609068032, 47182609068032, +STORE, 47182609068032, 47182611161087, +STORE, 47182611161088, 47182611177471, +STORE, 47182611169280, 47182611177471, +STORE, 47182611161088, 47182611169279, +ERASE, 47182611161088, 47182611161088, +STORE, 47182611161088, 47182611169279, +ERASE, 47182611169280, 47182611169280, +STORE, 47182611169280, 47182611177471, +STORE, 47182611177472, 47182611312639, +ERASE, 47182611177472, 47182611177472, +STORE, 47182611177472, 47182611202047, +STORE, 47182611202048, 47182611312639, +STORE, 47182611263488, 47182611312639, +STORE, 47182611202048, 47182611263487, +ERASE, 47182611202048, 47182611202048, +STORE, 47182611202048, 47182611263487, +STORE, 47182611288064, 47182611312639, +STORE, 47182611263488, 47182611288063, +ERASE, 47182611263488, 47182611263488, +STORE, 47182611263488, 47182611312639, +ERASE, 47182611263488, 47182611263488, +STORE, 47182611263488, 47182611288063, +STORE, 47182611288064, 47182611312639, +STORE, 47182611296256, 47182611312639, +STORE, 47182611288064, 47182611296255, +ERASE, 47182611288064, 47182611288064, +STORE, 47182611288064, 47182611296255, +ERASE, 47182611296256, 47182611296256, +STORE, 47182611296256, 47182611312639, +STORE, 47182611296256, 47182611320831, +STORE, 47182611320832, 47182611484671, +ERASE, 47182611320832, 47182611320832, +STORE, 47182611320832, 47182611333119, +STORE, 47182611333120, 47182611484671, +STORE, 47182611431424, 47182611484671, +STORE, 47182611333120, 47182611431423, +ERASE, 47182611333120, 47182611333120, +STORE, 47182611333120, 47182611431423, +STORE, 47182611476480, 47182611484671, +STORE, 47182611431424, 47182611476479, +ERASE, 47182611431424, 47182611431424, +STORE, 47182611431424, 47182611484671, +ERASE, 47182611431424, 47182611431424, +STORE, 47182611431424, 47182611476479, +STORE, 47182611476480, 47182611484671, +ERASE, 47182611476480, 47182611476480, +STORE, 47182611476480, 47182611484671, +STORE, 47182611484672, 47182612082687, +STORE, 47182611603456, 47182612082687, +STORE, 47182611484672, 47182611603455, +ERASE, 47182611603456, 47182611603456, +STORE, 47182611603456, 47182612029439, +STORE, 47182612029440, 47182612082687, +STORE, 47182611918848, 47182612029439, +STORE, 47182611603456, 47182611918847, +ERASE, 47182611603456, 47182611603456, +STORE, 47182611603456, 47182611918847, +STORE, 47182612025344, 47182612029439, +STORE, 47182611918848, 47182612025343, +ERASE, 47182611918848, 47182611918848, +STORE, 47182611918848, 47182612025343, +ERASE, 47182612029440, 47182612029440, +STORE, 47182612029440, 47182612082687, +STORE, 47182612082688, 47182615134207, +STORE, 47182612627456, 47182615134207, +STORE, 47182612082688, 47182612627455, +ERASE, 47182612627456, 47182612627456, +STORE, 47182612627456, 47182614913023, +STORE, 47182614913024, 47182615134207, +STORE, 47182614323200, 47182614913023, +STORE, 47182612627456, 47182614323199, +ERASE, 47182612627456, 47182612627456, +STORE, 47182612627456, 47182614323199, +STORE, 47182614908928, 47182614913023, +STORE, 47182614323200, 47182614908927, +ERASE, 47182614323200, 47182614323200, +STORE, 47182614323200, 47182614908927, +STORE, 47182615117824, 47182615134207, +STORE, 47182614913024, 47182615117823, +ERASE, 47182614913024, 47182614913024, +STORE, 47182614913024, 47182615117823, +ERASE, 47182615117824, 47182615117824, +STORE, 47182615117824, 47182615134207, +STORE, 47182615134208, 47182615166975, +ERASE, 47182615134208, 47182615134208, +STORE, 47182615134208, 47182615142399, +STORE, 47182615142400, 47182615166975, +STORE, 47182615154688, 47182615166975, +STORE, 47182615142400, 47182615154687, +ERASE, 47182615142400, 47182615142400, +STORE, 47182615142400, 47182615154687, +STORE, 47182615158784, 47182615166975, +STORE, 47182615154688, 47182615158783, +ERASE, 47182615154688, 47182615154688, +STORE, 47182615154688, 47182615166975, +ERASE, 47182615154688, 47182615154688, +STORE, 47182615154688, 47182615158783, +STORE, 47182615158784, 47182615166975, +ERASE, 47182615158784, 47182615158784, +STORE, 47182615158784, 47182615166975, +STORE, 47182615166976, 47182615203839, +ERASE, 47182615166976, 47182615166976, +STORE, 47182615166976, 47182615175167, +STORE, 47182615175168, 47182615203839, +STORE, 47182615191552, 47182615203839, +STORE, 47182615175168, 47182615191551, +ERASE, 47182615175168, 47182615175168, +STORE, 47182615175168, 47182615191551, +STORE, 47182615195648, 47182615203839, +STORE, 47182615191552, 47182615195647, +ERASE, 47182615191552, 47182615191552, +STORE, 47182615191552, 47182615203839, +ERASE, 47182615191552, 47182615191552, +STORE, 47182615191552, 47182615195647, +STORE, 47182615195648, 47182615203839, +ERASE, 47182615195648, 47182615195648, +STORE, 47182615195648, 47182615203839, +STORE, 47182615203840, 47182615678975, +ERASE, 47182615203840, 47182615203840, +STORE, 47182615203840, 47182615212031, +STORE, 47182615212032, 47182615678975, +STORE, 47182615547904, 47182615678975, +STORE, 47182615212032, 47182615547903, +ERASE, 47182615212032, 47182615212032, +STORE, 47182615212032, 47182615547903, +STORE, 47182615670784, 47182615678975, +STORE, 47182615547904, 47182615670783, +ERASE, 47182615547904, 47182615547904, +STORE, 47182615547904, 47182615678975, +ERASE, 47182615547904, 47182615547904, +STORE, 47182615547904, 47182615670783, +STORE, 47182615670784, 47182615678975, +ERASE, 47182615670784, 47182615670784, +STORE, 47182615670784, 47182615678975, +STORE, 47182615678976, 47182615687167, +STORE, 47182615687168, 47182615707647, +ERASE, 47182615687168, 47182615687168, +STORE, 47182615687168, 47182615691263, +STORE, 47182615691264, 47182615707647, +STORE, 47182615695360, 47182615707647, +STORE, 47182615691264, 47182615695359, +ERASE, 47182615691264, 47182615691264, +STORE, 47182615691264, 47182615695359, +STORE, 47182615699456, 47182615707647, +STORE, 47182615695360, 47182615699455, +ERASE, 47182615695360, 47182615695360, +STORE, 47182615695360, 47182615707647, +ERASE, 47182615695360, 47182615695360, +STORE, 47182615695360, 47182615699455, +STORE, 47182615699456, 47182615707647, +ERASE, 47182615699456, 47182615699456, +STORE, 47182615699456, 47182615707647, +STORE, 47182615707648, 47182615715839, +ERASE, 47182608371712, 47182608371712, +STORE, 47182608371712, 47182608388095, +STORE, 47182608388096, 47182608396287, +ERASE, 47182615699456, 47182615699456, +STORE, 47182615699456, 47182615703551, +STORE, 47182615703552, 47182615707647, +ERASE, 47182611288064, 47182611288064, +STORE, 47182611288064, 47182611292159, +STORE, 47182611292160, 47182611296255, +ERASE, 47182615670784, 47182615670784, +STORE, 47182615670784, 47182615674879, +STORE, 47182615674880, 47182615678975, +ERASE, 47182615195648, 47182615195648, +STORE, 47182615195648, 47182615199743, +STORE, 47182615199744, 47182615203839, +ERASE, 47182615158784, 47182615158784, +STORE, 47182615158784, 47182615162879, +STORE, 47182615162880, 47182615166975, +ERASE, 47182614913024, 47182614913024, +STORE, 47182614913024, 47182615109631, +STORE, 47182615109632, 47182615117823, +ERASE, 47182612029440, 47182612029440, +STORE, 47182612029440, 47182612066303, +STORE, 47182612066304, 47182612082687, +ERASE, 47182611476480, 47182611476480, +STORE, 47182611476480, 47182611480575, +STORE, 47182611480576, 47182611484671, +ERASE, 47182611161088, 47182611161088, +STORE, 47182611161088, 47182611165183, +STORE, 47182611165184, 47182611169279, +ERASE, 47182608891904, 47182608891904, +STORE, 47182608891904, 47182608912383, +STORE, 47182608912384, 47182608916479, +ERASE, 47182608560128, 47182608560128, +STORE, 47182608560128, 47182608564223, +STORE, 47182608564224, 47182608568319, +ERASE, 47182608515072, 47182608515072, +STORE, 47182608515072, 47182608519167, +STORE, 47182608519168, 47182608523263, +ERASE, 93963664379904, 93963664379904, +STORE, 93963664379904, 93963664502783, +STORE, 93963664502784, 93963664506879, +ERASE, 140450188599296, 140450188599296, +STORE, 140450188599296, 140450188603391, +STORE, 140450188603392, 140450188607487, +ERASE, 47182606557184, 47182606557184, +STORE, 93963694723072, 93963694858239, +STORE, 140737488347136, 140737488351231, +STORE, 140730313261056, 140737488351231, +ERASE, 140730313261056, 140730313261056, +STORE, 140730313261056, 140730313265151, +STORE, 94386579017728, 94386579697663, +ERASE, 94386579017728, 94386579017728, +STORE, 94386579017728, 94386579083263, +STORE, 94386579083264, 94386579697663, +ERASE, 94386579083264, 94386579083264, +STORE, 94386579083264, 94386579431423, +STORE, 94386579431424, 94386579570687, +STORE, 94386579570688, 94386579697663, +STORE, 140124810838016, 140124811010047, +ERASE, 140124810838016, 140124810838016, +STORE, 140124810838016, 140124810842111, +STORE, 140124810842112, 140124811010047, +ERASE, 140124810842112, 140124810842112, +STORE, 140124810842112, 140124810964991, +STORE, 140124810964992, 140124810997759, +STORE, 140124810997760, 140124811005951, +STORE, 140124811005952, 140124811010047, +STORE, 140730313601024, 140730313605119, +STORE, 140730313588736, 140730313601023, +STORE, 47507984158720, 47507984166911, +STORE, 47507984166912, 47507984175103, +STORE, 47507984175104, 47507986014207, +STORE, 47507984314368, 47507986014207, +STORE, 47507984175104, 47507984314367, +ERASE, 47507984314368, 47507984314368, +STORE, 47507984314368, 47507985973247, +STORE, 47507985973248, 47507986014207, +STORE, 47507985657856, 47507985973247, +STORE, 47507984314368, 47507985657855, +ERASE, 47507984314368, 47507984314368, +STORE, 47507984314368, 47507985657855, +STORE, 47507985969152, 47507985973247, +STORE, 47507985657856, 47507985969151, +ERASE, 47507985657856, 47507985657856, +STORE, 47507985657856, 47507985969151, +STORE, 47507985997824, 47507986014207, +STORE, 47507985973248, 47507985997823, +ERASE, 47507985973248, 47507985973248, +STORE, 47507985973248, 47507985997823, +ERASE, 47507985997824, 47507985997824, +STORE, 47507985997824, 47507986014207, +STORE, 47507986014208, 47507986124799, +STORE, 47507986030592, 47507986124799, +STORE, 47507986014208, 47507986030591, +ERASE, 47507986030592, 47507986030592, +STORE, 47507986030592, 47507986116607, +STORE, 47507986116608, 47507986124799, +STORE, 47507986092032, 47507986116607, +STORE, 47507986030592, 47507986092031, +ERASE, 47507986030592, 47507986030592, +STORE, 47507986030592, 47507986092031, +STORE, 47507986112512, 47507986116607, +STORE, 47507986092032, 47507986112511, +ERASE, 47507986092032, 47507986092032, +STORE, 47507986092032, 47507986112511, +ERASE, 47507986116608, 47507986116608, +STORE, 47507986116608, 47507986124799, +STORE, 47507986124800, 47507986169855, +ERASE, 47507986124800, 47507986124800, +STORE, 47507986124800, 47507986132991, +STORE, 47507986132992, 47507986169855, +STORE, 47507986153472, 47507986169855, +STORE, 47507986132992, 47507986153471, +ERASE, 47507986132992, 47507986132992, +STORE, 47507986132992, 47507986153471, +STORE, 47507986161664, 47507986169855, +STORE, 47507986153472, 47507986161663, +ERASE, 47507986153472, 47507986153472, +STORE, 47507986153472, 47507986169855, +ERASE, 47507986153472, 47507986153472, +STORE, 47507986153472, 47507986161663, +STORE, 47507986161664, 47507986169855, +ERASE, 47507986161664, 47507986161664, +STORE, 47507986161664, 47507986169855, +STORE, 47507986169856, 47507986518015, +STORE, 47507986210816, 47507986518015, +STORE, 47507986169856, 47507986210815, +ERASE, 47507986210816, 47507986210816, +STORE, 47507986210816, 47507986493439, +STORE, 47507986493440, 47507986518015, +STORE, 47507986423808, 47507986493439, +STORE, 47507986210816, 47507986423807, +ERASE, 47507986210816, 47507986210816, +STORE, 47507986210816, 47507986423807, +STORE, 47507986489344, 47507986493439, +STORE, 47507986423808, 47507986489343, +ERASE, 47507986423808, 47507986423808, +STORE, 47507986423808, 47507986489343, +ERASE, 47507986493440, 47507986493440, +STORE, 47507986493440, 47507986518015, +STORE, 47507986518016, 47507988779007, +STORE, 47507986669568, 47507988779007, +STORE, 47507986518016, 47507986669567, +ERASE, 47507986669568, 47507986669568, +STORE, 47507986669568, 47507988762623, +STORE, 47507988762624, 47507988779007, +STORE, 47507988770816, 47507988779007, +STORE, 47507988762624, 47507988770815, +ERASE, 47507988762624, 47507988762624, +STORE, 47507988762624, 47507988770815, +ERASE, 47507988770816, 47507988770816, +STORE, 47507988770816, 47507988779007, +STORE, 47507988779008, 47507988914175, +ERASE, 47507988779008, 47507988779008, +STORE, 47507988779008, 47507988803583, +STORE, 47507988803584, 47507988914175, +STORE, 47507988865024, 47507988914175, +STORE, 47507988803584, 47507988865023, +ERASE, 47507988803584, 47507988803584, +STORE, 47507988803584, 47507988865023, +STORE, 47507988889600, 47507988914175, +STORE, 47507988865024, 47507988889599, +ERASE, 47507988865024, 47507988865024, +STORE, 47507988865024, 47507988914175, +ERASE, 47507988865024, 47507988865024, +STORE, 47507988865024, 47507988889599, +STORE, 47507988889600, 47507988914175, +STORE, 47507988897792, 47507988914175, +STORE, 47507988889600, 47507988897791, +ERASE, 47507988889600, 47507988889600, +STORE, 47507988889600, 47507988897791, +ERASE, 47507988897792, 47507988897792, +STORE, 47507988897792, 47507988914175, +STORE, 47507988897792, 47507988922367, +STORE, 47507988922368, 47507989086207, +ERASE, 47507988922368, 47507988922368, +STORE, 47507988922368, 47507988934655, +STORE, 47507988934656, 47507989086207, +STORE, 47507989032960, 47507989086207, +STORE, 47507988934656, 47507989032959, +ERASE, 47507988934656, 47507988934656, +STORE, 47507988934656, 47507989032959, +STORE, 47507989078016, 47507989086207, +STORE, 47507989032960, 47507989078015, +ERASE, 47507989032960, 47507989032960, +STORE, 47507989032960, 47507989086207, +ERASE, 47507989032960, 47507989032960, +STORE, 47507989032960, 47507989078015, +STORE, 47507989078016, 47507989086207, +ERASE, 47507989078016, 47507989078016, +STORE, 47507989078016, 47507989086207, +STORE, 47507989086208, 47507989684223, +STORE, 47507989204992, 47507989684223, +STORE, 47507989086208, 47507989204991, +ERASE, 47507989204992, 47507989204992, +STORE, 47507989204992, 47507989630975, +STORE, 47507989630976, 47507989684223, +STORE, 47507989520384, 47507989630975, +STORE, 47507989204992, 47507989520383, +ERASE, 47507989204992, 47507989204992, +STORE, 47507989204992, 47507989520383, +STORE, 47507989626880, 47507989630975, +STORE, 47507989520384, 47507989626879, +ERASE, 47507989520384, 47507989520384, +STORE, 47507989520384, 47507989626879, +ERASE, 47507989630976, 47507989630976, +STORE, 47507989630976, 47507989684223, +STORE, 47507989684224, 47507992735743, +STORE, 47507990228992, 47507992735743, +STORE, 47507989684224, 47507990228991, +ERASE, 47507990228992, 47507990228992, +STORE, 47507990228992, 47507992514559, +STORE, 47507992514560, 47507992735743, +STORE, 47507991924736, 47507992514559, +STORE, 47507990228992, 47507991924735, +ERASE, 47507990228992, 47507990228992, +STORE, 47507990228992, 47507991924735, +STORE, 47507992510464, 47507992514559, +STORE, 47507991924736, 47507992510463, +ERASE, 47507991924736, 47507991924736, +STORE, 47507991924736, 47507992510463, +STORE, 47507992719360, 47507992735743, +STORE, 47507992514560, 47507992719359, +ERASE, 47507992514560, 47507992514560, +STORE, 47507992514560, 47507992719359, +ERASE, 47507992719360, 47507992719360, +STORE, 47507992719360, 47507992735743, +STORE, 47507992735744, 47507992768511, +ERASE, 47507992735744, 47507992735744, +STORE, 47507992735744, 47507992743935, +STORE, 47507992743936, 47507992768511, +STORE, 47507992756224, 47507992768511, +STORE, 47507992743936, 47507992756223, +ERASE, 47507992743936, 47507992743936, +STORE, 47507992743936, 47507992756223, +STORE, 47507992760320, 47507992768511, +STORE, 47507992756224, 47507992760319, +ERASE, 47507992756224, 47507992756224, +STORE, 47507992756224, 47507992768511, +ERASE, 47507992756224, 47507992756224, +STORE, 47507992756224, 47507992760319, +STORE, 47507992760320, 47507992768511, +ERASE, 47507992760320, 47507992760320, +STORE, 47507992760320, 47507992768511, +STORE, 47507992768512, 47507992805375, +ERASE, 47507992768512, 47507992768512, +STORE, 47507992768512, 47507992776703, +STORE, 47507992776704, 47507992805375, +STORE, 47507992793088, 47507992805375, +STORE, 47507992776704, 47507992793087, +ERASE, 47507992776704, 47507992776704, +STORE, 47507992776704, 47507992793087, +STORE, 47507992797184, 47507992805375, +STORE, 47507992793088, 47507992797183, +ERASE, 47507992793088, 47507992793088, +STORE, 47507992793088, 47507992805375, +ERASE, 47507992793088, 47507992793088, +STORE, 47507992793088, 47507992797183, +STORE, 47507992797184, 47507992805375, +ERASE, 47507992797184, 47507992797184, +STORE, 47507992797184, 47507992805375, +STORE, 47507992805376, 47507993280511, +ERASE, 47507992805376, 47507992805376, +STORE, 47507992805376, 47507992813567, +STORE, 47507992813568, 47507993280511, +STORE, 47507993149440, 47507993280511, +STORE, 47507992813568, 47507993149439, +ERASE, 47507992813568, 47507992813568, +STORE, 47507992813568, 47507993149439, +STORE, 47507993272320, 47507993280511, +STORE, 47507993149440, 47507993272319, +ERASE, 47507993149440, 47507993149440, +STORE, 47507993149440, 47507993280511, +ERASE, 47507993149440, 47507993149440, +STORE, 47507993149440, 47507993272319, +STORE, 47507993272320, 47507993280511, +ERASE, 47507993272320, 47507993272320, +STORE, 47507993272320, 47507993280511, +STORE, 47507993280512, 47507993288703, +STORE, 47507993288704, 47507993309183, +ERASE, 47507993288704, 47507993288704, +STORE, 47507993288704, 47507993292799, +STORE, 47507993292800, 47507993309183, +STORE, 47507993296896, 47507993309183, +STORE, 47507993292800, 47507993296895, +ERASE, 47507993292800, 47507993292800, +STORE, 47507993292800, 47507993296895, +STORE, 47507993300992, 47507993309183, +STORE, 47507993296896, 47507993300991, +ERASE, 47507993296896, 47507993296896, +STORE, 47507993296896, 47507993309183, +ERASE, 47507993296896, 47507993296896, +STORE, 47507993296896, 47507993300991, +STORE, 47507993300992, 47507993309183, +ERASE, 47507993300992, 47507993300992, +STORE, 47507993300992, 47507993309183, +STORE, 47507993309184, 47507993317375, +ERASE, 47507985973248, 47507985973248, +STORE, 47507985973248, 47507985989631, +STORE, 47507985989632, 47507985997823, +ERASE, 47507993300992, 47507993300992, +STORE, 47507993300992, 47507993305087, +STORE, 47507993305088, 47507993309183, +ERASE, 47507988889600, 47507988889600, +STORE, 47507988889600, 47507988893695, +STORE, 47507988893696, 47507988897791, +ERASE, 47507993272320, 47507993272320, +STORE, 47507993272320, 47507993276415, +STORE, 47507993276416, 47507993280511, +ERASE, 47507992797184, 47507992797184, +STORE, 47507992797184, 47507992801279, +STORE, 47507992801280, 47507992805375, +ERASE, 47507992760320, 47507992760320, +STORE, 47507992760320, 47507992764415, +STORE, 47507992764416, 47507992768511, +ERASE, 47507992514560, 47507992514560, +STORE, 47507992514560, 47507992711167, +STORE, 47507992711168, 47507992719359, +ERASE, 47507989630976, 47507989630976, +STORE, 47507989630976, 47507989667839, +STORE, 47507989667840, 47507989684223, +ERASE, 47507989078016, 47507989078016, +STORE, 47507989078016, 47507989082111, +STORE, 47507989082112, 47507989086207, +ERASE, 47507988762624, 47507988762624, +STORE, 47507988762624, 47507988766719, +STORE, 47507988766720, 47507988770815, +ERASE, 47507986493440, 47507986493440, +STORE, 47507986493440, 47507986513919, +STORE, 47507986513920, 47507986518015, +ERASE, 47507986161664, 47507986161664, +STORE, 47507986161664, 47507986165759, +STORE, 47507986165760, 47507986169855, +ERASE, 47507986116608, 47507986116608, +STORE, 47507986116608, 47507986120703, +STORE, 47507986120704, 47507986124799, +ERASE, 94386579570688, 94386579570688, +STORE, 94386579570688, 94386579693567, +STORE, 94386579693568, 94386579697663, +ERASE, 140124810997760, 140124810997760, +STORE, 140124810997760, 140124811001855, +STORE, 140124811001856, 140124811005951, +ERASE, 47507984158720, 47507984158720, +STORE, 94386583982080, 94386584117247, +STORE, 94386583982080, 94386584256511, +ERASE, 94386583982080, 94386583982080, +STORE, 94386583982080, 94386584223743, +STORE, 94386584223744, 94386584256511, +ERASE, 94386584223744, 94386584223744, +STORE, 140737488347136, 140737488351231, +STORE, 140733763395584, 140737488351231, +ERASE, 140733763395584, 140733763395584, +STORE, 140733763395584, 140733763399679, +STORE, 94011546472448, 94011547152383, +ERASE, 94011546472448, 94011546472448, +STORE, 94011546472448, 94011546537983, +STORE, 94011546537984, 94011547152383, +ERASE, 94011546537984, 94011546537984, +STORE, 94011546537984, 94011546886143, +STORE, 94011546886144, 94011547025407, +STORE, 94011547025408, 94011547152383, +STORE, 139757597949952, 139757598121983, +ERASE, 139757597949952, 139757597949952, +STORE, 139757597949952, 139757597954047, +STORE, 139757597954048, 139757598121983, +ERASE, 139757597954048, 139757597954048, +STORE, 139757597954048, 139757598076927, +STORE, 139757598076928, 139757598109695, +STORE, 139757598109696, 139757598117887, +STORE, 139757598117888, 139757598121983, +STORE, 140733763596288, 140733763600383, +STORE, 140733763584000, 140733763596287, +STORE, 47875197046784, 47875197054975, +STORE, 47875197054976, 47875197063167, +STORE, 47875197063168, 47875198902271, +STORE, 47875197202432, 47875198902271, +STORE, 47875197063168, 47875197202431, +ERASE, 47875197202432, 47875197202432, +STORE, 47875197202432, 47875198861311, +STORE, 47875198861312, 47875198902271, +STORE, 47875198545920, 47875198861311, +STORE, 47875197202432, 47875198545919, +ERASE, 47875197202432, 47875197202432, +STORE, 47875197202432, 47875198545919, +STORE, 47875198857216, 47875198861311, +STORE, 47875198545920, 47875198857215, +ERASE, 47875198545920, 47875198545920, +STORE, 47875198545920, 47875198857215, +STORE, 47875198885888, 47875198902271, +STORE, 47875198861312, 47875198885887, +ERASE, 47875198861312, 47875198861312, +STORE, 47875198861312, 47875198885887, +ERASE, 47875198885888, 47875198885888, +STORE, 47875198885888, 47875198902271, +STORE, 47875198902272, 47875199012863, +STORE, 47875198918656, 47875199012863, +STORE, 47875198902272, 47875198918655, +ERASE, 47875198918656, 47875198918656, +STORE, 47875198918656, 47875199004671, +STORE, 47875199004672, 47875199012863, +STORE, 47875198980096, 47875199004671, +STORE, 47875198918656, 47875198980095, +ERASE, 47875198918656, 47875198918656, +STORE, 47875198918656, 47875198980095, +STORE, 47875199000576, 47875199004671, +STORE, 47875198980096, 47875199000575, +ERASE, 47875198980096, 47875198980096, +STORE, 47875198980096, 47875199000575, +ERASE, 47875199004672, 47875199004672, +STORE, 47875199004672, 47875199012863, +STORE, 47875199012864, 47875199057919, +ERASE, 47875199012864, 47875199012864, +STORE, 47875199012864, 47875199021055, +STORE, 47875199021056, 47875199057919, +STORE, 47875199041536, 47875199057919, +STORE, 47875199021056, 47875199041535, +ERASE, 47875199021056, 47875199021056, +STORE, 47875199021056, 47875199041535, +STORE, 47875199049728, 47875199057919, +STORE, 47875199041536, 47875199049727, +ERASE, 47875199041536, 47875199041536, +STORE, 47875199041536, 47875199057919, +ERASE, 47875199041536, 47875199041536, +STORE, 47875199041536, 47875199049727, +STORE, 47875199049728, 47875199057919, +ERASE, 47875199049728, 47875199049728, +STORE, 47875199049728, 47875199057919, +STORE, 47875199057920, 47875199406079, +STORE, 47875199098880, 47875199406079, +STORE, 47875199057920, 47875199098879, +ERASE, 47875199098880, 47875199098880, +STORE, 47875199098880, 47875199381503, +STORE, 47875199381504, 47875199406079, +STORE, 47875199311872, 47875199381503, +STORE, 47875199098880, 47875199311871, +ERASE, 47875199098880, 47875199098880, +STORE, 47875199098880, 47875199311871, +STORE, 47875199377408, 47875199381503, +STORE, 47875199311872, 47875199377407, +ERASE, 47875199311872, 47875199311872, +STORE, 47875199311872, 47875199377407, +ERASE, 47875199381504, 47875199381504, +STORE, 47875199381504, 47875199406079, +STORE, 47875199406080, 47875201667071, +STORE, 47875199557632, 47875201667071, +STORE, 47875199406080, 47875199557631, +ERASE, 47875199557632, 47875199557632, +STORE, 47875199557632, 47875201650687, +STORE, 47875201650688, 47875201667071, +STORE, 47875201658880, 47875201667071, +STORE, 47875201650688, 47875201658879, +ERASE, 47875201650688, 47875201650688, +STORE, 47875201650688, 47875201658879, +ERASE, 47875201658880, 47875201658880, +STORE, 47875201658880, 47875201667071, +STORE, 47875201667072, 47875201802239, +ERASE, 47875201667072, 47875201667072, +STORE, 47875201667072, 47875201691647, +STORE, 47875201691648, 47875201802239, +STORE, 47875201753088, 47875201802239, +STORE, 47875201691648, 47875201753087, +ERASE, 47875201691648, 47875201691648, +STORE, 47875201691648, 47875201753087, +STORE, 47875201777664, 47875201802239, +STORE, 47875201753088, 47875201777663, +ERASE, 47875201753088, 47875201753088, +STORE, 47875201753088, 47875201802239, +ERASE, 47875201753088, 47875201753088, +STORE, 47875201753088, 47875201777663, +STORE, 47875201777664, 47875201802239, +STORE, 47875201785856, 47875201802239, +STORE, 47875201777664, 47875201785855, +ERASE, 47875201777664, 47875201777664, +STORE, 47875201777664, 47875201785855, +ERASE, 47875201785856, 47875201785856, +STORE, 47875201785856, 47875201802239, +STORE, 47875201785856, 47875201810431, +STORE, 47875201810432, 47875201974271, +ERASE, 47875201810432, 47875201810432, +STORE, 47875201810432, 47875201822719, +STORE, 47875201822720, 47875201974271, +STORE, 47875201921024, 47875201974271, +STORE, 47875201822720, 47875201921023, +ERASE, 47875201822720, 47875201822720, +STORE, 47875201822720, 47875201921023, +STORE, 47875201966080, 47875201974271, +STORE, 47875201921024, 47875201966079, +ERASE, 47875201921024, 47875201921024, +STORE, 47875201921024, 47875201974271, +ERASE, 47875201921024, 47875201921024, +STORE, 47875201921024, 47875201966079, +STORE, 47875201966080, 47875201974271, +ERASE, 47875201966080, 47875201966080, +STORE, 47875201966080, 47875201974271, +STORE, 47875201974272, 47875202572287, +STORE, 47875202093056, 47875202572287, +STORE, 47875201974272, 47875202093055, +ERASE, 47875202093056, 47875202093056, +STORE, 47875202093056, 47875202519039, +STORE, 47875202519040, 47875202572287, +STORE, 47875202408448, 47875202519039, +STORE, 47875202093056, 47875202408447, +ERASE, 47875202093056, 47875202093056, +STORE, 47875202093056, 47875202408447, +STORE, 47875202514944, 47875202519039, +STORE, 47875202408448, 47875202514943, +ERASE, 47875202408448, 47875202408448, +STORE, 47875202408448, 47875202514943, +ERASE, 47875202519040, 47875202519040, +STORE, 47875202519040, 47875202572287, +STORE, 47875202572288, 47875205623807, +STORE, 47875203117056, 47875205623807, +STORE, 47875202572288, 47875203117055, +ERASE, 47875203117056, 47875203117056, +STORE, 47875203117056, 47875205402623, +STORE, 47875205402624, 47875205623807, +STORE, 47875204812800, 47875205402623, +STORE, 47875203117056, 47875204812799, +ERASE, 47875203117056, 47875203117056, +STORE, 47875203117056, 47875204812799, +STORE, 47875205398528, 47875205402623, +STORE, 47875204812800, 47875205398527, +ERASE, 47875204812800, 47875204812800, +STORE, 47875204812800, 47875205398527, +STORE, 47875205607424, 47875205623807, +STORE, 47875205402624, 47875205607423, +ERASE, 47875205402624, 47875205402624, +STORE, 47875205402624, 47875205607423, +ERASE, 47875205607424, 47875205607424, +STORE, 47875205607424, 47875205623807, +STORE, 47875205623808, 47875205656575, +ERASE, 47875205623808, 47875205623808, +STORE, 47875205623808, 47875205631999, +STORE, 47875205632000, 47875205656575, +STORE, 47875205644288, 47875205656575, +STORE, 47875205632000, 47875205644287, +ERASE, 47875205632000, 47875205632000, +STORE, 47875205632000, 47875205644287, +STORE, 47875205648384, 47875205656575, +STORE, 47875205644288, 47875205648383, +ERASE, 47875205644288, 47875205644288, +STORE, 47875205644288, 47875205656575, +ERASE, 47875205644288, 47875205644288, +STORE, 47875205644288, 47875205648383, +STORE, 47875205648384, 47875205656575, +ERASE, 47875205648384, 47875205648384, +STORE, 47875205648384, 47875205656575, +STORE, 47875205656576, 47875205693439, +ERASE, 47875205656576, 47875205656576, +STORE, 47875205656576, 47875205664767, +STORE, 47875205664768, 47875205693439, +STORE, 47875205681152, 47875205693439, +STORE, 47875205664768, 47875205681151, +ERASE, 47875205664768, 47875205664768, +STORE, 47875205664768, 47875205681151, +STORE, 47875205685248, 47875205693439, +STORE, 47875205681152, 47875205685247, +ERASE, 47875205681152, 47875205681152, +STORE, 47875205681152, 47875205693439, +ERASE, 47875205681152, 47875205681152, +STORE, 47875205681152, 47875205685247, +STORE, 47875205685248, 47875205693439, +ERASE, 47875205685248, 47875205685248, +STORE, 47875205685248, 47875205693439, +STORE, 47875205693440, 47875206168575, +ERASE, 47875205693440, 47875205693440, +STORE, 47875205693440, 47875205701631, +STORE, 47875205701632, 47875206168575, +STORE, 47875206037504, 47875206168575, +STORE, 47875205701632, 47875206037503, +ERASE, 47875205701632, 47875205701632, +STORE, 47875205701632, 47875206037503, +STORE, 47875206160384, 47875206168575, +STORE, 47875206037504, 47875206160383, +ERASE, 47875206037504, 47875206037504, +STORE, 47875206037504, 47875206168575, +ERASE, 47875206037504, 47875206037504, +STORE, 47875206037504, 47875206160383, +STORE, 47875206160384, 47875206168575, +ERASE, 47875206160384, 47875206160384, +STORE, 47875206160384, 47875206168575, +STORE, 47875206168576, 47875206176767, +STORE, 47875206176768, 47875206197247, +ERASE, 47875206176768, 47875206176768, +STORE, 47875206176768, 47875206180863, +STORE, 47875206180864, 47875206197247, +STORE, 47875206184960, 47875206197247, +STORE, 47875206180864, 47875206184959, +ERASE, 47875206180864, 47875206180864, +STORE, 47875206180864, 47875206184959, +STORE, 47875206189056, 47875206197247, +STORE, 47875206184960, 47875206189055, +ERASE, 47875206184960, 47875206184960, +STORE, 47875206184960, 47875206197247, +ERASE, 47875206184960, 47875206184960, +STORE, 47875206184960, 47875206189055, +STORE, 47875206189056, 47875206197247, +ERASE, 47875206189056, 47875206189056, +STORE, 47875206189056, 47875206197247, +STORE, 47875206197248, 47875206205439, +ERASE, 47875198861312, 47875198861312, +STORE, 47875198861312, 47875198877695, +STORE, 47875198877696, 47875198885887, +ERASE, 47875206189056, 47875206189056, +STORE, 47875206189056, 47875206193151, +STORE, 47875206193152, 47875206197247, +ERASE, 47875201777664, 47875201777664, +STORE, 47875201777664, 47875201781759, +STORE, 47875201781760, 47875201785855, +ERASE, 47875206160384, 47875206160384, +STORE, 47875206160384, 47875206164479, +STORE, 47875206164480, 47875206168575, +ERASE, 47875205685248, 47875205685248, +STORE, 47875205685248, 47875205689343, +STORE, 47875205689344, 47875205693439, +ERASE, 47875205648384, 47875205648384, +STORE, 47875205648384, 47875205652479, +STORE, 47875205652480, 47875205656575, +ERASE, 47875205402624, 47875205402624, +STORE, 47875205402624, 47875205599231, +STORE, 47875205599232, 47875205607423, +ERASE, 47875202519040, 47875202519040, +STORE, 47875202519040, 47875202555903, +STORE, 47875202555904, 47875202572287, +ERASE, 47875201966080, 47875201966080, +STORE, 47875201966080, 47875201970175, +STORE, 47875201970176, 47875201974271, +ERASE, 47875201650688, 47875201650688, +STORE, 47875201650688, 47875201654783, +STORE, 47875201654784, 47875201658879, +ERASE, 47875199381504, 47875199381504, +STORE, 47875199381504, 47875199401983, +STORE, 47875199401984, 47875199406079, +ERASE, 47875199049728, 47875199049728, +STORE, 47875199049728, 47875199053823, +STORE, 47875199053824, 47875199057919, +ERASE, 47875199004672, 47875199004672, +STORE, 47875199004672, 47875199008767, +STORE, 47875199008768, 47875199012863, +ERASE, 94011547025408, 94011547025408, +STORE, 94011547025408, 94011547148287, +STORE, 94011547148288, 94011547152383, +ERASE, 139757598109696, 139757598109696, +STORE, 139757598109696, 139757598113791, +STORE, 139757598113792, 139757598117887, +ERASE, 47875197046784, 47875197046784, +STORE, 94011557584896, 94011557720063, +STORE, 94011557584896, 94011557855231, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557851135, +STORE, 94011557851136, 94011557855231, +ERASE, 94011557851136, 94011557851136, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557847039, +STORE, 94011557847040, 94011557851135, +ERASE, 94011557847040, 94011557847040, +STORE, 94011557584896, 94011557982207, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557978111, +STORE, 94011557978112, 94011557982207, +ERASE, 94011557978112, 94011557978112, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557974015, +STORE, 94011557974016, 94011557978111, +ERASE, 94011557974016, 94011557974016, +STORE, 140737488347136, 140737488351231, +STORE, 140734130360320, 140737488351231, +ERASE, 140734130360320, 140734130360320, +STORE, 140734130360320, 140734130364415, +STORE, 94641232105472, 94641232785407, +ERASE, 94641232105472, 94641232105472, +STORE, 94641232105472, 94641232171007, +STORE, 94641232171008, 94641232785407, +ERASE, 94641232171008, 94641232171008, +STORE, 94641232171008, 94641232519167, +STORE, 94641232519168, 94641232658431, +STORE, 94641232658432, 94641232785407, +STORE, 139726599516160, 139726599688191, +ERASE, 139726599516160, 139726599516160, +STORE, 139726599516160, 139726599520255, +STORE, 139726599520256, 139726599688191, +ERASE, 139726599520256, 139726599520256, +STORE, 139726599520256, 139726599643135, +STORE, 139726599643136, 139726599675903, +STORE, 139726599675904, 139726599684095, +STORE, 139726599684096, 139726599688191, +STORE, 140734130446336, 140734130450431, +STORE, 140734130434048, 140734130446335, +STORE, 47906195480576, 47906195488767, +STORE, 47906195488768, 47906195496959, +STORE, 47906195496960, 47906197336063, +STORE, 47906195636224, 47906197336063, +STORE, 47906195496960, 47906195636223, +ERASE, 47906195636224, 47906195636224, +STORE, 47906195636224, 47906197295103, +STORE, 47906197295104, 47906197336063, +STORE, 47906196979712, 47906197295103, +STORE, 47906195636224, 47906196979711, +ERASE, 47906195636224, 47906195636224, +STORE, 47906195636224, 47906196979711, +STORE, 47906197291008, 47906197295103, +STORE, 47906196979712, 47906197291007, +ERASE, 47906196979712, 47906196979712, +STORE, 47906196979712, 47906197291007, +STORE, 47906197319680, 47906197336063, +STORE, 47906197295104, 47906197319679, +ERASE, 47906197295104, 47906197295104, +STORE, 47906197295104, 47906197319679, +ERASE, 47906197319680, 47906197319680, +STORE, 47906197319680, 47906197336063, +STORE, 47906197336064, 47906197446655, +STORE, 47906197352448, 47906197446655, +STORE, 47906197336064, 47906197352447, +ERASE, 47906197352448, 47906197352448, +STORE, 47906197352448, 47906197438463, +STORE, 47906197438464, 47906197446655, +STORE, 47906197413888, 47906197438463, +STORE, 47906197352448, 47906197413887, +ERASE, 47906197352448, 47906197352448, +STORE, 47906197352448, 47906197413887, +STORE, 47906197434368, 47906197438463, +STORE, 47906197413888, 47906197434367, +ERASE, 47906197413888, 47906197413888, +STORE, 47906197413888, 47906197434367, +ERASE, 47906197438464, 47906197438464, +STORE, 47906197438464, 47906197446655, +STORE, 47906197446656, 47906197491711, +ERASE, 47906197446656, 47906197446656, +STORE, 47906197446656, 47906197454847, +STORE, 47906197454848, 47906197491711, +STORE, 47906197475328, 47906197491711, +STORE, 47906197454848, 47906197475327, +ERASE, 47906197454848, 47906197454848, +STORE, 47906197454848, 47906197475327, +STORE, 47906197483520, 47906197491711, +STORE, 47906197475328, 47906197483519, +ERASE, 47906197475328, 47906197475328, +STORE, 47906197475328, 47906197491711, +ERASE, 47906197475328, 47906197475328, +STORE, 47906197475328, 47906197483519, +STORE, 47906197483520, 47906197491711, +ERASE, 47906197483520, 47906197483520, +STORE, 47906197483520, 47906197491711, +STORE, 47906197491712, 47906197839871, +STORE, 47906197532672, 47906197839871, +STORE, 47906197491712, 47906197532671, +ERASE, 47906197532672, 47906197532672, +STORE, 47906197532672, 47906197815295, +STORE, 47906197815296, 47906197839871, +STORE, 47906197745664, 47906197815295, +STORE, 47906197532672, 47906197745663, +ERASE, 47906197532672, 47906197532672, +STORE, 47906197532672, 47906197745663, +STORE, 47906197811200, 47906197815295, +STORE, 47906197745664, 47906197811199, +ERASE, 47906197745664, 47906197745664, +STORE, 47906197745664, 47906197811199, +ERASE, 47906197815296, 47906197815296, +STORE, 47906197815296, 47906197839871, +STORE, 47906197839872, 47906200100863, +STORE, 47906197991424, 47906200100863, +STORE, 47906197839872, 47906197991423, +ERASE, 47906197991424, 47906197991424, +STORE, 47906197991424, 47906200084479, +STORE, 47906200084480, 47906200100863, +STORE, 47906200092672, 47906200100863, +STORE, 47906200084480, 47906200092671, +ERASE, 47906200084480, 47906200084480, +STORE, 47906200084480, 47906200092671, +ERASE, 47906200092672, 47906200092672, +STORE, 47906200092672, 47906200100863, +STORE, 47906200100864, 47906200236031, +ERASE, 47906200100864, 47906200100864, +STORE, 47906200100864, 47906200125439, +STORE, 47906200125440, 47906200236031, +STORE, 47906200186880, 47906200236031, +STORE, 47906200125440, 47906200186879, +ERASE, 47906200125440, 47906200125440, +STORE, 47906200125440, 47906200186879, +STORE, 47906200211456, 47906200236031, +STORE, 47906200186880, 47906200211455, +ERASE, 47906200186880, 47906200186880, +STORE, 47906200186880, 47906200236031, +ERASE, 47906200186880, 47906200186880, +STORE, 47906200186880, 47906200211455, +STORE, 47906200211456, 47906200236031, +STORE, 47906200219648, 47906200236031, +STORE, 47906200211456, 47906200219647, +ERASE, 47906200211456, 47906200211456, +STORE, 47906200211456, 47906200219647, +ERASE, 47906200219648, 47906200219648, +STORE, 47906200219648, 47906200236031, +STORE, 47906200219648, 47906200244223, +STORE, 47906200244224, 47906200408063, +ERASE, 47906200244224, 47906200244224, +STORE, 47906200244224, 47906200256511, +STORE, 47906200256512, 47906200408063, +STORE, 47906200354816, 47906200408063, +STORE, 47906200256512, 47906200354815, +ERASE, 47906200256512, 47906200256512, +STORE, 47906200256512, 47906200354815, +STORE, 47906200399872, 47906200408063, +STORE, 47906200354816, 47906200399871, +ERASE, 47906200354816, 47906200354816, +STORE, 47906200354816, 47906200408063, +ERASE, 47906200354816, 47906200354816, +STORE, 47906200354816, 47906200399871, +STORE, 47906200399872, 47906200408063, +ERASE, 47906200399872, 47906200399872, +STORE, 47906200399872, 47906200408063, +STORE, 47906200408064, 47906201006079, +STORE, 47906200526848, 47906201006079, +STORE, 47906200408064, 47906200526847, +ERASE, 47906200526848, 47906200526848, +STORE, 47906200526848, 47906200952831, +STORE, 47906200952832, 47906201006079, +STORE, 47906200842240, 47906200952831, +STORE, 47906200526848, 47906200842239, +ERASE, 47906200526848, 47906200526848, +STORE, 47906200526848, 47906200842239, +STORE, 47906200948736, 47906200952831, +STORE, 47906200842240, 47906200948735, +ERASE, 47906200842240, 47906200842240, +STORE, 47906200842240, 47906200948735, +ERASE, 47906200952832, 47906200952832, +STORE, 47906200952832, 47906201006079, +STORE, 47906201006080, 47906204057599, +STORE, 47906201550848, 47906204057599, +STORE, 47906201006080, 47906201550847, +ERASE, 47906201550848, 47906201550848, +STORE, 47906201550848, 47906203836415, +STORE, 47906203836416, 47906204057599, +STORE, 47906203246592, 47906203836415, +STORE, 47906201550848, 47906203246591, +ERASE, 47906201550848, 47906201550848, +STORE, 47906201550848, 47906203246591, +STORE, 47906203832320, 47906203836415, +STORE, 47906203246592, 47906203832319, +ERASE, 47906203246592, 47906203246592, +STORE, 47906203246592, 47906203832319, +STORE, 47906204041216, 47906204057599, +STORE, 47906203836416, 47906204041215, +ERASE, 47906203836416, 47906203836416, +STORE, 47906203836416, 47906204041215, +ERASE, 47906204041216, 47906204041216, +STORE, 47906204041216, 47906204057599, +STORE, 47906204057600, 47906204090367, +ERASE, 47906204057600, 47906204057600, +STORE, 47906204057600, 47906204065791, +STORE, 47906204065792, 47906204090367, +STORE, 47906204078080, 47906204090367, +STORE, 47906204065792, 47906204078079, +ERASE, 47906204065792, 47906204065792, +STORE, 47906204065792, 47906204078079, +STORE, 47906204082176, 47906204090367, +STORE, 47906204078080, 47906204082175, +ERASE, 47906204078080, 47906204078080, +STORE, 47906204078080, 47906204090367, +ERASE, 47906204078080, 47906204078080, +STORE, 47906204078080, 47906204082175, +STORE, 47906204082176, 47906204090367, +ERASE, 47906204082176, 47906204082176, +STORE, 47906204082176, 47906204090367, +STORE, 47906204090368, 47906204127231, +ERASE, 47906204090368, 47906204090368, +STORE, 47906204090368, 47906204098559, +STORE, 47906204098560, 47906204127231, +STORE, 47906204114944, 47906204127231, +STORE, 47906204098560, 47906204114943, +ERASE, 47906204098560, 47906204098560, +STORE, 47906204098560, 47906204114943, +STORE, 47906204119040, 47906204127231, +STORE, 47906204114944, 47906204119039, +ERASE, 47906204114944, 47906204114944, +STORE, 47906204114944, 47906204127231, +ERASE, 47906204114944, 47906204114944, +STORE, 47906204114944, 47906204119039, +STORE, 47906204119040, 47906204127231, +ERASE, 47906204119040, 47906204119040, +STORE, 47906204119040, 47906204127231, +STORE, 47906204127232, 47906204602367, +ERASE, 47906204127232, 47906204127232, +STORE, 47906204127232, 47906204135423, +STORE, 47906204135424, 47906204602367, +STORE, 47906204471296, 47906204602367, +STORE, 47906204135424, 47906204471295, +ERASE, 47906204135424, 47906204135424, +STORE, 47906204135424, 47906204471295, +STORE, 47906204594176, 47906204602367, +STORE, 47906204471296, 47906204594175, +ERASE, 47906204471296, 47906204471296, +STORE, 47906204471296, 47906204602367, +ERASE, 47906204471296, 47906204471296, +STORE, 47906204471296, 47906204594175, +STORE, 47906204594176, 47906204602367, +ERASE, 47906204594176, 47906204594176, +STORE, 47906204594176, 47906204602367, +STORE, 47906204602368, 47906204610559, +STORE, 47906204610560, 47906204631039, +ERASE, 47906204610560, 47906204610560, +STORE, 47906204610560, 47906204614655, +STORE, 47906204614656, 47906204631039, +STORE, 47906204618752, 47906204631039, +STORE, 47906204614656, 47906204618751, +ERASE, 47906204614656, 47906204614656, +STORE, 47906204614656, 47906204618751, +STORE, 47906204622848, 47906204631039, +STORE, 47906204618752, 47906204622847, +ERASE, 47906204618752, 47906204618752, +STORE, 47906204618752, 47906204631039, +ERASE, 47906204618752, 47906204618752, +STORE, 47906204618752, 47906204622847, +STORE, 47906204622848, 47906204631039, +ERASE, 47906204622848, 47906204622848, +STORE, 47906204622848, 47906204631039, +STORE, 47906204631040, 47906204639231, +ERASE, 47906197295104, 47906197295104, +STORE, 47906197295104, 47906197311487, +STORE, 47906197311488, 47906197319679, +ERASE, 47906204622848, 47906204622848, +STORE, 47906204622848, 47906204626943, +STORE, 47906204626944, 47906204631039, +ERASE, 47906200211456, 47906200211456, +STORE, 47906200211456, 47906200215551, +STORE, 47906200215552, 47906200219647, +ERASE, 47906204594176, 47906204594176, +STORE, 47906204594176, 47906204598271, +STORE, 47906204598272, 47906204602367, +ERASE, 47906204119040, 47906204119040, +STORE, 47906204119040, 47906204123135, +STORE, 47906204123136, 47906204127231, +ERASE, 47906204082176, 47906204082176, +STORE, 47906204082176, 47906204086271, +STORE, 47906204086272, 47906204090367, +ERASE, 47906203836416, 47906203836416, +STORE, 47906203836416, 47906204033023, +STORE, 47906204033024, 47906204041215, +ERASE, 47906200952832, 47906200952832, +STORE, 47906200952832, 47906200989695, +STORE, 47906200989696, 47906201006079, +ERASE, 47906200399872, 47906200399872, +STORE, 47906200399872, 47906200403967, +STORE, 47906200403968, 47906200408063, +ERASE, 47906200084480, 47906200084480, +STORE, 47906200084480, 47906200088575, +STORE, 47906200088576, 47906200092671, +ERASE, 47906197815296, 47906197815296, +STORE, 47906197815296, 47906197835775, +STORE, 47906197835776, 47906197839871, +ERASE, 47906197483520, 47906197483520, +STORE, 47906197483520, 47906197487615, +STORE, 47906197487616, 47906197491711, +ERASE, 47906197438464, 47906197438464, +STORE, 47906197438464, 47906197442559, +STORE, 47906197442560, 47906197446655, +ERASE, 94641232658432, 94641232658432, +STORE, 94641232658432, 94641232781311, +STORE, 94641232781312, 94641232785407, +ERASE, 139726599675904, 139726599675904, +STORE, 139726599675904, 139726599679999, +STORE, 139726599680000, 139726599684095, +ERASE, 47906195480576, 47906195480576, +STORE, 94641242615808, 94641242750975, + }; + + unsigned long set10[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140736427839488, 140737488351231, +ERASE, 140736427839488, 140736427839488, +STORE, 140736427839488, 140736427843583, +STORE, 94071213395968, 94071213567999, +ERASE, 94071213395968, 94071213395968, +STORE, 94071213395968, 94071213412351, +STORE, 94071213412352, 94071213567999, +ERASE, 94071213412352, 94071213412352, +STORE, 94071213412352, 94071213514751, +STORE, 94071213514752, 94071213555711, +STORE, 94071213555712, 94071213567999, +STORE, 139968410644480, 139968410816511, +ERASE, 139968410644480, 139968410644480, +STORE, 139968410644480, 139968410648575, +STORE, 139968410648576, 139968410816511, +ERASE, 139968410648576, 139968410648576, +STORE, 139968410648576, 139968410771455, +STORE, 139968410771456, 139968410804223, +STORE, 139968410804224, 139968410812415, +STORE, 139968410812416, 139968410816511, +STORE, 140736429277184, 140736429281279, +STORE, 140736429264896, 140736429277183, +STORE, 47664384352256, 47664384360447, +STORE, 47664384360448, 47664384368639, +STORE, 47664384368640, 47664384532479, +ERASE, 47664384368640, 47664384368640, +STORE, 47664384368640, 47664384380927, +STORE, 47664384380928, 47664384532479, +STORE, 47664384479232, 47664384532479, +STORE, 47664384380928, 47664384479231, +ERASE, 47664384380928, 47664384380928, +STORE, 47664384380928, 47664384479231, +STORE, 47664384524288, 47664384532479, +STORE, 47664384479232, 47664384524287, +ERASE, 47664384479232, 47664384479232, +STORE, 47664384479232, 47664384532479, +ERASE, 47664384479232, 47664384479232, +STORE, 47664384479232, 47664384524287, +STORE, 47664384524288, 47664384532479, +ERASE, 47664384524288, 47664384524288, +STORE, 47664384524288, 47664384532479, +STORE, 47664384532480, 47664387583999, +STORE, 47664385077248, 47664387583999, +STORE, 47664384532480, 47664385077247, +ERASE, 47664385077248, 47664385077248, +STORE, 47664385077248, 47664387362815, +STORE, 47664387362816, 47664387583999, +STORE, 47664386772992, 47664387362815, +STORE, 47664385077248, 47664386772991, +ERASE, 47664385077248, 47664385077248, +STORE, 47664385077248, 47664386772991, +STORE, 47664387358720, 47664387362815, +STORE, 47664386772992, 47664387358719, +ERASE, 47664386772992, 47664386772992, +STORE, 47664386772992, 47664387358719, +STORE, 47664387567616, 47664387583999, +STORE, 47664387362816, 47664387567615, +ERASE, 47664387362816, 47664387362816, +STORE, 47664387362816, 47664387567615, +ERASE, 47664387567616, 47664387567616, +STORE, 47664387567616, 47664387583999, +STORE, 47664387584000, 47664389423103, +STORE, 47664387723264, 47664389423103, +STORE, 47664387584000, 47664387723263, +ERASE, 47664387723264, 47664387723264, +STORE, 47664387723264, 47664389382143, +STORE, 47664389382144, 47664389423103, +STORE, 47664389066752, 47664389382143, +STORE, 47664387723264, 47664389066751, +ERASE, 47664387723264, 47664387723264, +STORE, 47664387723264, 47664389066751, +STORE, 47664389378048, 47664389382143, +STORE, 47664389066752, 47664389378047, +ERASE, 47664389066752, 47664389066752, +STORE, 47664389066752, 47664389378047, +STORE, 47664389406720, 47664389423103, +STORE, 47664389382144, 47664389406719, +ERASE, 47664389382144, 47664389382144, +STORE, 47664389382144, 47664389406719, +ERASE, 47664389406720, 47664389406720, +STORE, 47664389406720, 47664389423103, +STORE, 47664389423104, 47664389558271, +ERASE, 47664389423104, 47664389423104, +STORE, 47664389423104, 47664389447679, +STORE, 47664389447680, 47664389558271, +STORE, 47664389509120, 47664389558271, +STORE, 47664389447680, 47664389509119, +ERASE, 47664389447680, 47664389447680, +STORE, 47664389447680, 47664389509119, +STORE, 47664389533696, 47664389558271, +STORE, 47664389509120, 47664389533695, +ERASE, 47664389509120, 47664389509120, +STORE, 47664389509120, 47664389558271, +ERASE, 47664389509120, 47664389509120, +STORE, 47664389509120, 47664389533695, +STORE, 47664389533696, 47664389558271, +STORE, 47664389541888, 47664389558271, +STORE, 47664389533696, 47664389541887, +ERASE, 47664389533696, 47664389533696, +STORE, 47664389533696, 47664389541887, +ERASE, 47664389541888, 47664389541888, +STORE, 47664389541888, 47664389558271, +STORE, 47664389558272, 47664389578751, +ERASE, 47664389558272, 47664389558272, +STORE, 47664389558272, 47664389562367, +STORE, 47664389562368, 47664389578751, +STORE, 47664389566464, 47664389578751, +STORE, 47664389562368, 47664389566463, +ERASE, 47664389562368, 47664389562368, +STORE, 47664389562368, 47664389566463, +STORE, 47664389570560, 47664389578751, +STORE, 47664389566464, 47664389570559, +ERASE, 47664389566464, 47664389566464, +STORE, 47664389566464, 47664389578751, +ERASE, 47664389566464, 47664389566464, +STORE, 47664389566464, 47664389570559, +STORE, 47664389570560, 47664389578751, +ERASE, 47664389570560, 47664389570560, +STORE, 47664389570560, 47664389578751, +STORE, 47664389578752, 47664389586943, +ERASE, 47664389382144, 47664389382144, +STORE, 47664389382144, 47664389398527, +STORE, 47664389398528, 47664389406719, +ERASE, 47664389570560, 47664389570560, +STORE, 47664389570560, 47664389574655, +STORE, 47664389574656, 47664389578751, +ERASE, 47664389533696, 47664389533696, +STORE, 47664389533696, 47664389537791, +STORE, 47664389537792, 47664389541887, +ERASE, 47664387362816, 47664387362816, +STORE, 47664387362816, 47664387559423, +STORE, 47664387559424, 47664387567615, +ERASE, 47664384524288, 47664384524288, +STORE, 47664384524288, 47664384528383, +STORE, 47664384528384, 47664384532479, +ERASE, 94071213555712, 94071213555712, +STORE, 94071213555712, 94071213563903, +STORE, 94071213563904, 94071213567999, +ERASE, 139968410804224, 139968410804224, +STORE, 139968410804224, 139968410808319, +STORE, 139968410808320, 139968410812415, +ERASE, 47664384352256, 47664384352256, +STORE, 94071244402688, 94071244537855, +STORE, 140737488347136, 140737488351231, +STORE, 140728271503360, 140737488351231, +ERASE, 140728271503360, 140728271503360, +STORE, 140728271503360, 140728271507455, +STORE, 94410361982976, 94410362155007, +ERASE, 94410361982976, 94410361982976, +STORE, 94410361982976, 94410361999359, +STORE, 94410361999360, 94410362155007, +ERASE, 94410361999360, 94410361999360, +STORE, 94410361999360, 94410362101759, +STORE, 94410362101760, 94410362142719, +STORE, 94410362142720, 94410362155007, +STORE, 140351953997824, 140351954169855, +ERASE, 140351953997824, 140351953997824, +STORE, 140351953997824, 140351954001919, +STORE, 140351954001920, 140351954169855, +ERASE, 140351954001920, 140351954001920, +STORE, 140351954001920, 140351954124799, +STORE, 140351954124800, 140351954157567, +STORE, 140351954157568, 140351954165759, +STORE, 140351954165760, 140351954169855, +STORE, 140728272429056, 140728272433151, +STORE, 140728272416768, 140728272429055, +STORE, 47280840998912, 47280841007103, +STORE, 47280841007104, 47280841015295, +STORE, 47280841015296, 47280841179135, +ERASE, 47280841015296, 47280841015296, +STORE, 47280841015296, 47280841027583, +STORE, 47280841027584, 47280841179135, +STORE, 47280841125888, 47280841179135, +STORE, 47280841027584, 47280841125887, +ERASE, 47280841027584, 47280841027584, +STORE, 47280841027584, 47280841125887, +STORE, 47280841170944, 47280841179135, +STORE, 47280841125888, 47280841170943, +ERASE, 47280841125888, 47280841125888, +STORE, 47280841125888, 47280841179135, +ERASE, 47280841125888, 47280841125888, +STORE, 47280841125888, 47280841170943, +STORE, 47280841170944, 47280841179135, +ERASE, 47280841170944, 47280841170944, +STORE, 47280841170944, 47280841179135, +STORE, 47280841179136, 47280844230655, +STORE, 47280841723904, 47280844230655, +STORE, 47280841179136, 47280841723903, +ERASE, 47280841723904, 47280841723904, +STORE, 47280841723904, 47280844009471, +STORE, 47280844009472, 47280844230655, +STORE, 47280843419648, 47280844009471, +STORE, 47280841723904, 47280843419647, +ERASE, 47280841723904, 47280841723904, +STORE, 47280841723904, 47280843419647, +STORE, 47280844005376, 47280844009471, +STORE, 47280843419648, 47280844005375, +ERASE, 47280843419648, 47280843419648, +STORE, 47280843419648, 47280844005375, +STORE, 47280844214272, 47280844230655, +STORE, 47280844009472, 47280844214271, +ERASE, 47280844009472, 47280844009472, +STORE, 47280844009472, 47280844214271, +ERASE, 47280844214272, 47280844214272, +STORE, 47280844214272, 47280844230655, +STORE, 47280844230656, 47280846069759, +STORE, 47280844369920, 47280846069759, +STORE, 47280844230656, 47280844369919, +ERASE, 47280844369920, 47280844369920, +STORE, 47280844369920, 47280846028799, +STORE, 47280846028800, 47280846069759, +STORE, 47280845713408, 47280846028799, +STORE, 47280844369920, 47280845713407, +ERASE, 47280844369920, 47280844369920, +STORE, 47280844369920, 47280845713407, +STORE, 47280846024704, 47280846028799, +STORE, 47280845713408, 47280846024703, +ERASE, 47280845713408, 47280845713408, +STORE, 47280845713408, 47280846024703, +STORE, 47280846053376, 47280846069759, +STORE, 47280846028800, 47280846053375, +ERASE, 47280846028800, 47280846028800, +STORE, 47280846028800, 47280846053375, +ERASE, 47280846053376, 47280846053376, +STORE, 47280846053376, 47280846069759, +STORE, 47280846069760, 47280846204927, +ERASE, 47280846069760, 47280846069760, +STORE, 47280846069760, 47280846094335, +STORE, 47280846094336, 47280846204927, +STORE, 47280846155776, 47280846204927, +STORE, 47280846094336, 47280846155775, +ERASE, 47280846094336, 47280846094336, +STORE, 47280846094336, 47280846155775, +STORE, 47280846180352, 47280846204927, +STORE, 47280846155776, 47280846180351, +ERASE, 47280846155776, 47280846155776, +STORE, 47280846155776, 47280846204927, +ERASE, 47280846155776, 47280846155776, +STORE, 47280846155776, 47280846180351, +STORE, 47280846180352, 47280846204927, +STORE, 47280846188544, 47280846204927, +STORE, 47280846180352, 47280846188543, +ERASE, 47280846180352, 47280846180352, +STORE, 47280846180352, 47280846188543, +ERASE, 47280846188544, 47280846188544, +STORE, 47280846188544, 47280846204927, +STORE, 47280846204928, 47280846225407, +ERASE, 47280846204928, 47280846204928, +STORE, 47280846204928, 47280846209023, +STORE, 47280846209024, 47280846225407, +STORE, 47280846213120, 47280846225407, +STORE, 47280846209024, 47280846213119, +ERASE, 47280846209024, 47280846209024, +STORE, 47280846209024, 47280846213119, +STORE, 47280846217216, 47280846225407, +STORE, 47280846213120, 47280846217215, +ERASE, 47280846213120, 47280846213120, +STORE, 47280846213120, 47280846225407, +ERASE, 47280846213120, 47280846213120, +STORE, 47280846213120, 47280846217215, +STORE, 47280846217216, 47280846225407, +ERASE, 47280846217216, 47280846217216, +STORE, 47280846217216, 47280846225407, +STORE, 47280846225408, 47280846233599, +ERASE, 47280846028800, 47280846028800, +STORE, 47280846028800, 47280846045183, +STORE, 47280846045184, 47280846053375, +ERASE, 47280846217216, 47280846217216, +STORE, 47280846217216, 47280846221311, +STORE, 47280846221312, 47280846225407, +ERASE, 47280846180352, 47280846180352, +STORE, 47280846180352, 47280846184447, +STORE, 47280846184448, 47280846188543, +ERASE, 47280844009472, 47280844009472, +STORE, 47280844009472, 47280844206079, +STORE, 47280844206080, 47280844214271, +ERASE, 47280841170944, 47280841170944, +STORE, 47280841170944, 47280841175039, +STORE, 47280841175040, 47280841179135, +ERASE, 94410362142720, 94410362142720, +STORE, 94410362142720, 94410362150911, +STORE, 94410362150912, 94410362155007, +ERASE, 140351954157568, 140351954157568, +STORE, 140351954157568, 140351954161663, +STORE, 140351954161664, 140351954165759, +ERASE, 47280840998912, 47280840998912, +STORE, 94410379456512, 94410379591679, +STORE, 140737488347136, 140737488351231, +STORE, 140732946362368, 140737488351231, +ERASE, 140732946362368, 140732946362368, +STORE, 140732946362368, 140732946366463, +STORE, 94352937934848, 94352938106879, +ERASE, 94352937934848, 94352937934848, +STORE, 94352937934848, 94352937951231, +STORE, 94352937951232, 94352938106879, +ERASE, 94352937951232, 94352937951232, +STORE, 94352937951232, 94352938053631, +STORE, 94352938053632, 94352938094591, +STORE, 94352938094592, 94352938106879, +STORE, 140595518742528, 140595518914559, +ERASE, 140595518742528, 140595518742528, +STORE, 140595518742528, 140595518746623, +STORE, 140595518746624, 140595518914559, +ERASE, 140595518746624, 140595518746624, +STORE, 140595518746624, 140595518869503, +STORE, 140595518869504, 140595518902271, +STORE, 140595518902272, 140595518910463, +STORE, 140595518910464, 140595518914559, +STORE, 140732947468288, 140732947472383, +STORE, 140732947456000, 140732947468287, +STORE, 47037276254208, 47037276262399, +STORE, 47037276262400, 47037276270591, +STORE, 47037276270592, 47037276434431, +ERASE, 47037276270592, 47037276270592, +STORE, 47037276270592, 47037276282879, +STORE, 47037276282880, 47037276434431, +STORE, 47037276381184, 47037276434431, +STORE, 47037276282880, 47037276381183, +ERASE, 47037276282880, 47037276282880, +STORE, 47037276282880, 47037276381183, +STORE, 47037276426240, 47037276434431, +STORE, 47037276381184, 47037276426239, +ERASE, 47037276381184, 47037276381184, +STORE, 47037276381184, 47037276434431, +ERASE, 47037276381184, 47037276381184, +STORE, 47037276381184, 47037276426239, +STORE, 47037276426240, 47037276434431, +ERASE, 47037276426240, 47037276426240, +STORE, 47037276426240, 47037276434431, +STORE, 47037276434432, 47037279485951, +STORE, 47037276979200, 47037279485951, +STORE, 47037276434432, 47037276979199, +ERASE, 47037276979200, 47037276979200, +STORE, 47037276979200, 47037279264767, +STORE, 47037279264768, 47037279485951, +STORE, 47037278674944, 47037279264767, +STORE, 47037276979200, 47037278674943, +ERASE, 47037276979200, 47037276979200, +STORE, 47037276979200, 47037278674943, +STORE, 47037279260672, 47037279264767, +STORE, 47037278674944, 47037279260671, +ERASE, 47037278674944, 47037278674944, +STORE, 47037278674944, 47037279260671, +STORE, 47037279469568, 47037279485951, +STORE, 47037279264768, 47037279469567, +ERASE, 47037279264768, 47037279264768, +STORE, 47037279264768, 47037279469567, +ERASE, 47037279469568, 47037279469568, +STORE, 47037279469568, 47037279485951, +STORE, 47037279485952, 47037281325055, +STORE, 47037279625216, 47037281325055, +STORE, 47037279485952, 47037279625215, +ERASE, 47037279625216, 47037279625216, +STORE, 47037279625216, 47037281284095, +STORE, 47037281284096, 47037281325055, +STORE, 47037280968704, 47037281284095, +STORE, 47037279625216, 47037280968703, +ERASE, 47037279625216, 47037279625216, +STORE, 47037279625216, 47037280968703, +STORE, 47037281280000, 47037281284095, +STORE, 47037280968704, 47037281279999, +ERASE, 47037280968704, 47037280968704, +STORE, 47037280968704, 47037281279999, +STORE, 47037281308672, 47037281325055, +STORE, 47037281284096, 47037281308671, +ERASE, 47037281284096, 47037281284096, +STORE, 47037281284096, 47037281308671, +ERASE, 47037281308672, 47037281308672, +STORE, 47037281308672, 47037281325055, +STORE, 47037281325056, 47037281460223, +ERASE, 47037281325056, 47037281325056, +STORE, 47037281325056, 47037281349631, +STORE, 47037281349632, 47037281460223, +STORE, 47037281411072, 47037281460223, +STORE, 47037281349632, 47037281411071, +ERASE, 47037281349632, 47037281349632, +STORE, 47037281349632, 47037281411071, +STORE, 47037281435648, 47037281460223, +STORE, 47037281411072, 47037281435647, +ERASE, 47037281411072, 47037281411072, +STORE, 47037281411072, 47037281460223, +ERASE, 47037281411072, 47037281411072, +STORE, 47037281411072, 47037281435647, +STORE, 47037281435648, 47037281460223, +STORE, 47037281443840, 47037281460223, +STORE, 47037281435648, 47037281443839, +ERASE, 47037281435648, 47037281435648, +STORE, 47037281435648, 47037281443839, +ERASE, 47037281443840, 47037281443840, +STORE, 47037281443840, 47037281460223, +STORE, 47037281460224, 47037281480703, +ERASE, 47037281460224, 47037281460224, +STORE, 47037281460224, 47037281464319, +STORE, 47037281464320, 47037281480703, +STORE, 47037281468416, 47037281480703, +STORE, 47037281464320, 47037281468415, +ERASE, 47037281464320, 47037281464320, +STORE, 47037281464320, 47037281468415, +STORE, 47037281472512, 47037281480703, +STORE, 47037281468416, 47037281472511, +ERASE, 47037281468416, 47037281468416, +STORE, 47037281468416, 47037281480703, +ERASE, 47037281468416, 47037281468416, +STORE, 47037281468416, 47037281472511, +STORE, 47037281472512, 47037281480703, +ERASE, 47037281472512, 47037281472512, +STORE, 47037281472512, 47037281480703, +STORE, 47037281480704, 47037281488895, +ERASE, 47037281284096, 47037281284096, +STORE, 47037281284096, 47037281300479, +STORE, 47037281300480, 47037281308671, +ERASE, 47037281472512, 47037281472512, +STORE, 47037281472512, 47037281476607, +STORE, 47037281476608, 47037281480703, +ERASE, 47037281435648, 47037281435648, +STORE, 47037281435648, 47037281439743, +STORE, 47037281439744, 47037281443839, +ERASE, 47037279264768, 47037279264768, +STORE, 47037279264768, 47037279461375, +STORE, 47037279461376, 47037279469567, +ERASE, 47037276426240, 47037276426240, +STORE, 47037276426240, 47037276430335, +STORE, 47037276430336, 47037276434431, +ERASE, 94352938094592, 94352938094592, +STORE, 94352938094592, 94352938102783, +STORE, 94352938102784, 94352938106879, +ERASE, 140595518902272, 140595518902272, +STORE, 140595518902272, 140595518906367, +STORE, 140595518906368, 140595518910463, +ERASE, 47037276254208, 47037276254208, +STORE, 94352938438656, 94352938573823, +STORE, 140737488347136, 140737488351231, +STORE, 140733506027520, 140737488351231, +ERASE, 140733506027520, 140733506027520, +STORE, 140733506027520, 140733506031615, +STORE, 94150123073536, 94150123245567, +ERASE, 94150123073536, 94150123073536, +STORE, 94150123073536, 94150123089919, +STORE, 94150123089920, 94150123245567, +ERASE, 94150123089920, 94150123089920, +STORE, 94150123089920, 94150123192319, +STORE, 94150123192320, 94150123233279, +STORE, 94150123233280, 94150123245567, +STORE, 140081290375168, 140081290547199, +ERASE, 140081290375168, 140081290375168, +STORE, 140081290375168, 140081290379263, +STORE, 140081290379264, 140081290547199, +ERASE, 140081290379264, 140081290379264, +STORE, 140081290379264, 140081290502143, +STORE, 140081290502144, 140081290534911, +STORE, 140081290534912, 140081290543103, +STORE, 140081290543104, 140081290547199, +STORE, 140733506707456, 140733506711551, +STORE, 140733506695168, 140733506707455, +STORE, 47551504621568, 47551504629759, +STORE, 47551504629760, 47551504637951, +STORE, 47551504637952, 47551504801791, +ERASE, 47551504637952, 47551504637952, +STORE, 47551504637952, 47551504650239, +STORE, 47551504650240, 47551504801791, +STORE, 47551504748544, 47551504801791, +STORE, 47551504650240, 47551504748543, +ERASE, 47551504650240, 47551504650240, +STORE, 47551504650240, 47551504748543, +STORE, 47551504793600, 47551504801791, +STORE, 47551504748544, 47551504793599, +ERASE, 47551504748544, 47551504748544, +STORE, 47551504748544, 47551504801791, +ERASE, 47551504748544, 47551504748544, +STORE, 47551504748544, 47551504793599, +STORE, 47551504793600, 47551504801791, +ERASE, 47551504793600, 47551504793600, +STORE, 47551504793600, 47551504801791, +STORE, 47551504801792, 47551507853311, +STORE, 47551505346560, 47551507853311, +STORE, 47551504801792, 47551505346559, +ERASE, 47551505346560, 47551505346560, +STORE, 47551505346560, 47551507632127, +STORE, 47551507632128, 47551507853311, +STORE, 47551507042304, 47551507632127, +STORE, 47551505346560, 47551507042303, +ERASE, 47551505346560, 47551505346560, +STORE, 47551505346560, 47551507042303, +STORE, 47551507628032, 47551507632127, +STORE, 47551507042304, 47551507628031, +ERASE, 47551507042304, 47551507042304, +STORE, 47551507042304, 47551507628031, +STORE, 47551507836928, 47551507853311, +STORE, 47551507632128, 47551507836927, +ERASE, 47551507632128, 47551507632128, +STORE, 47551507632128, 47551507836927, +ERASE, 47551507836928, 47551507836928, +STORE, 47551507836928, 47551507853311, +STORE, 47551507853312, 47551509692415, +STORE, 47551507992576, 47551509692415, +STORE, 47551507853312, 47551507992575, +ERASE, 47551507992576, 47551507992576, +STORE, 47551507992576, 47551509651455, +STORE, 47551509651456, 47551509692415, +STORE, 47551509336064, 47551509651455, +STORE, 47551507992576, 47551509336063, +ERASE, 47551507992576, 47551507992576, +STORE, 47551507992576, 47551509336063, +STORE, 47551509647360, 47551509651455, +STORE, 47551509336064, 47551509647359, +ERASE, 47551509336064, 47551509336064, +STORE, 47551509336064, 47551509647359, +STORE, 47551509676032, 47551509692415, +STORE, 47551509651456, 47551509676031, +ERASE, 47551509651456, 47551509651456, +STORE, 47551509651456, 47551509676031, +ERASE, 47551509676032, 47551509676032, +STORE, 47551509676032, 47551509692415, +STORE, 47551509692416, 47551509827583, +ERASE, 47551509692416, 47551509692416, +STORE, 47551509692416, 47551509716991, +STORE, 47551509716992, 47551509827583, +STORE, 47551509778432, 47551509827583, +STORE, 47551509716992, 47551509778431, +ERASE, 47551509716992, 47551509716992, +STORE, 47551509716992, 47551509778431, +STORE, 47551509803008, 47551509827583, +STORE, 47551509778432, 47551509803007, +ERASE, 47551509778432, 47551509778432, +STORE, 47551509778432, 47551509827583, +ERASE, 47551509778432, 47551509778432, +STORE, 47551509778432, 47551509803007, +STORE, 47551509803008, 47551509827583, +STORE, 47551509811200, 47551509827583, +STORE, 47551509803008, 47551509811199, +ERASE, 47551509803008, 47551509803008, +STORE, 47551509803008, 47551509811199, +ERASE, 47551509811200, 47551509811200, +STORE, 47551509811200, 47551509827583, +STORE, 47551509827584, 47551509848063, +ERASE, 47551509827584, 47551509827584, +STORE, 47551509827584, 47551509831679, +STORE, 47551509831680, 47551509848063, +STORE, 47551509835776, 47551509848063, +STORE, 47551509831680, 47551509835775, +ERASE, 47551509831680, 47551509831680, +STORE, 47551509831680, 47551509835775, +STORE, 47551509839872, 47551509848063, +STORE, 47551509835776, 47551509839871, +ERASE, 47551509835776, 47551509835776, +STORE, 47551509835776, 47551509848063, +ERASE, 47551509835776, 47551509835776, +STORE, 47551509835776, 47551509839871, +STORE, 47551509839872, 47551509848063, +ERASE, 47551509839872, 47551509839872, +STORE, 47551509839872, 47551509848063, +STORE, 47551509848064, 47551509856255, +ERASE, 47551509651456, 47551509651456, +STORE, 47551509651456, 47551509667839, +STORE, 47551509667840, 47551509676031, +ERASE, 47551509839872, 47551509839872, +STORE, 47551509839872, 47551509843967, +STORE, 47551509843968, 47551509848063, +ERASE, 47551509803008, 47551509803008, +STORE, 47551509803008, 47551509807103, +STORE, 47551509807104, 47551509811199, +ERASE, 47551507632128, 47551507632128, +STORE, 47551507632128, 47551507828735, +STORE, 47551507828736, 47551507836927, +ERASE, 47551504793600, 47551504793600, +STORE, 47551504793600, 47551504797695, +STORE, 47551504797696, 47551504801791, +ERASE, 94150123233280, 94150123233280, +STORE, 94150123233280, 94150123241471, +STORE, 94150123241472, 94150123245567, +ERASE, 140081290534912, 140081290534912, +STORE, 140081290534912, 140081290539007, +STORE, 140081290539008, 140081290543103, +ERASE, 47551504621568, 47551504621568, +STORE, 94150148112384, 94150148247551, +STORE, 140737488347136, 140737488351231, +STORE, 140734389334016, 140737488351231, +ERASE, 140734389334016, 140734389334016, +STORE, 140734389334016, 140734389338111, +STORE, 94844636606464, 94844636778495, +ERASE, 94844636606464, 94844636606464, +STORE, 94844636606464, 94844636622847, +STORE, 94844636622848, 94844636778495, +ERASE, 94844636622848, 94844636622848, +STORE, 94844636622848, 94844636725247, +STORE, 94844636725248, 94844636766207, +STORE, 94844636766208, 94844636778495, +STORE, 139922765217792, 139922765389823, +ERASE, 139922765217792, 139922765217792, +STORE, 139922765217792, 139922765221887, +STORE, 139922765221888, 139922765389823, +ERASE, 139922765221888, 139922765221888, +STORE, 139922765221888, 139922765344767, +STORE, 139922765344768, 139922765377535, +STORE, 139922765377536, 139922765385727, +STORE, 139922765385728, 139922765389823, +STORE, 140734389678080, 140734389682175, +STORE, 140734389665792, 140734389678079, +STORE, 47710029778944, 47710029787135, +STORE, 47710029787136, 47710029795327, +STORE, 47710029795328, 47710029959167, +ERASE, 47710029795328, 47710029795328, +STORE, 47710029795328, 47710029807615, +STORE, 47710029807616, 47710029959167, +STORE, 47710029905920, 47710029959167, +STORE, 47710029807616, 47710029905919, +ERASE, 47710029807616, 47710029807616, +STORE, 47710029807616, 47710029905919, +STORE, 47710029950976, 47710029959167, +STORE, 47710029905920, 47710029950975, +ERASE, 47710029905920, 47710029905920, +STORE, 47710029905920, 47710029959167, +ERASE, 47710029905920, 47710029905920, +STORE, 47710029905920, 47710029950975, +STORE, 47710029950976, 47710029959167, +ERASE, 47710029950976, 47710029950976, +STORE, 47710029950976, 47710029959167, +STORE, 47710029959168, 47710033010687, +STORE, 47710030503936, 47710033010687, +STORE, 47710029959168, 47710030503935, +ERASE, 47710030503936, 47710030503936, +STORE, 47710030503936, 47710032789503, +STORE, 47710032789504, 47710033010687, +STORE, 47710032199680, 47710032789503, +STORE, 47710030503936, 47710032199679, +ERASE, 47710030503936, 47710030503936, +STORE, 47710030503936, 47710032199679, +STORE, 47710032785408, 47710032789503, +STORE, 47710032199680, 47710032785407, +ERASE, 47710032199680, 47710032199680, +STORE, 47710032199680, 47710032785407, +STORE, 47710032994304, 47710033010687, +STORE, 47710032789504, 47710032994303, +ERASE, 47710032789504, 47710032789504, +STORE, 47710032789504, 47710032994303, +ERASE, 47710032994304, 47710032994304, +STORE, 47710032994304, 47710033010687, +STORE, 47710033010688, 47710034849791, +STORE, 47710033149952, 47710034849791, +STORE, 47710033010688, 47710033149951, +ERASE, 47710033149952, 47710033149952, +STORE, 47710033149952, 47710034808831, +STORE, 47710034808832, 47710034849791, +STORE, 47710034493440, 47710034808831, +STORE, 47710033149952, 47710034493439, +ERASE, 47710033149952, 47710033149952, +STORE, 47710033149952, 47710034493439, +STORE, 47710034804736, 47710034808831, +STORE, 47710034493440, 47710034804735, +ERASE, 47710034493440, 47710034493440, +STORE, 47710034493440, 47710034804735, +STORE, 47710034833408, 47710034849791, +STORE, 47710034808832, 47710034833407, +ERASE, 47710034808832, 47710034808832, +STORE, 47710034808832, 47710034833407, +ERASE, 47710034833408, 47710034833408, +STORE, 47710034833408, 47710034849791, +STORE, 47710034849792, 47710034984959, +ERASE, 47710034849792, 47710034849792, +STORE, 47710034849792, 47710034874367, +STORE, 47710034874368, 47710034984959, +STORE, 47710034935808, 47710034984959, +STORE, 47710034874368, 47710034935807, +ERASE, 47710034874368, 47710034874368, +STORE, 47710034874368, 47710034935807, +STORE, 47710034960384, 47710034984959, +STORE, 47710034935808, 47710034960383, +ERASE, 47710034935808, 47710034935808, +STORE, 47710034935808, 47710034984959, +ERASE, 47710034935808, 47710034935808, +STORE, 47710034935808, 47710034960383, +STORE, 47710034960384, 47710034984959, +STORE, 47710034968576, 47710034984959, +STORE, 47710034960384, 47710034968575, +ERASE, 47710034960384, 47710034960384, +STORE, 47710034960384, 47710034968575, +ERASE, 47710034968576, 47710034968576, +STORE, 47710034968576, 47710034984959, +STORE, 47710034984960, 47710035005439, +ERASE, 47710034984960, 47710034984960, +STORE, 47710034984960, 47710034989055, +STORE, 47710034989056, 47710035005439, +STORE, 47710034993152, 47710035005439, +STORE, 47710034989056, 47710034993151, +ERASE, 47710034989056, 47710034989056, +STORE, 47710034989056, 47710034993151, +STORE, 47710034997248, 47710035005439, +STORE, 47710034993152, 47710034997247, +ERASE, 47710034993152, 47710034993152, +STORE, 47710034993152, 47710035005439, +ERASE, 47710034993152, 47710034993152, +STORE, 47710034993152, 47710034997247, +STORE, 47710034997248, 47710035005439, +ERASE, 47710034997248, 47710034997248, +STORE, 47710034997248, 47710035005439, +STORE, 47710035005440, 47710035013631, +ERASE, 47710034808832, 47710034808832, +STORE, 47710034808832, 47710034825215, +STORE, 47710034825216, 47710034833407, +ERASE, 47710034997248, 47710034997248, +STORE, 47710034997248, 47710035001343, +STORE, 47710035001344, 47710035005439, +ERASE, 47710034960384, 47710034960384, +STORE, 47710034960384, 47710034964479, +STORE, 47710034964480, 47710034968575, +ERASE, 47710032789504, 47710032789504, +STORE, 47710032789504, 47710032986111, +STORE, 47710032986112, 47710032994303, +ERASE, 47710029950976, 47710029950976, +STORE, 47710029950976, 47710029955071, +STORE, 47710029955072, 47710029959167, +ERASE, 94844636766208, 94844636766208, +STORE, 94844636766208, 94844636774399, +STORE, 94844636774400, 94844636778495, +ERASE, 139922765377536, 139922765377536, +STORE, 139922765377536, 139922765381631, +STORE, 139922765381632, 139922765385727, +ERASE, 47710029778944, 47710029778944, +STORE, 94844641775616, 94844641910783, +STORE, 140737488347136, 140737488351231, +STORE, 140732213886976, 140737488351231, +ERASE, 140732213886976, 140732213886976, +STORE, 140732213886976, 140732213891071, +STORE, 94240508887040, 94240509059071, +ERASE, 94240508887040, 94240508887040, +STORE, 94240508887040, 94240508903423, +STORE, 94240508903424, 94240509059071, +ERASE, 94240508903424, 94240508903424, +STORE, 94240508903424, 94240509005823, +STORE, 94240509005824, 94240509046783, +STORE, 94240509046784, 94240509059071, +STORE, 140275106516992, 140275106689023, +ERASE, 140275106516992, 140275106516992, +STORE, 140275106516992, 140275106521087, +STORE, 140275106521088, 140275106689023, +ERASE, 140275106521088, 140275106521088, +STORE, 140275106521088, 140275106643967, +STORE, 140275106643968, 140275106676735, +STORE, 140275106676736, 140275106684927, +STORE, 140275106684928, 140275106689023, +STORE, 140732213977088, 140732213981183, +STORE, 140732213964800, 140732213977087, +STORE, 47357688479744, 47357688487935, +STORE, 47357688487936, 47357688496127, +STORE, 47357688496128, 47357688659967, +ERASE, 47357688496128, 47357688496128, +STORE, 47357688496128, 47357688508415, +STORE, 47357688508416, 47357688659967, +STORE, 47357688606720, 47357688659967, +STORE, 47357688508416, 47357688606719, +ERASE, 47357688508416, 47357688508416, +STORE, 47357688508416, 47357688606719, +STORE, 47357688651776, 47357688659967, +STORE, 47357688606720, 47357688651775, +ERASE, 47357688606720, 47357688606720, +STORE, 47357688606720, 47357688659967, +ERASE, 47357688606720, 47357688606720, +STORE, 47357688606720, 47357688651775, +STORE, 47357688651776, 47357688659967, +ERASE, 47357688651776, 47357688651776, +STORE, 47357688651776, 47357688659967, +STORE, 47357688659968, 47357691711487, +STORE, 47357689204736, 47357691711487, +STORE, 47357688659968, 47357689204735, +ERASE, 47357689204736, 47357689204736, +STORE, 47357689204736, 47357691490303, +STORE, 47357691490304, 47357691711487, +STORE, 47357690900480, 47357691490303, +STORE, 47357689204736, 47357690900479, +ERASE, 47357689204736, 47357689204736, +STORE, 47357689204736, 47357690900479, +STORE, 47357691486208, 47357691490303, +STORE, 47357690900480, 47357691486207, +ERASE, 47357690900480, 47357690900480, +STORE, 47357690900480, 47357691486207, +STORE, 47357691695104, 47357691711487, +STORE, 47357691490304, 47357691695103, +ERASE, 47357691490304, 47357691490304, +STORE, 47357691490304, 47357691695103, +ERASE, 47357691695104, 47357691695104, +STORE, 47357691695104, 47357691711487, +STORE, 47357691711488, 47357693550591, +STORE, 47357691850752, 47357693550591, +STORE, 47357691711488, 47357691850751, +ERASE, 47357691850752, 47357691850752, +STORE, 47357691850752, 47357693509631, +STORE, 47357693509632, 47357693550591, +STORE, 47357693194240, 47357693509631, +STORE, 47357691850752, 47357693194239, +ERASE, 47357691850752, 47357691850752, +STORE, 47357691850752, 47357693194239, +STORE, 47357693505536, 47357693509631, +STORE, 47357693194240, 47357693505535, +ERASE, 47357693194240, 47357693194240, +STORE, 47357693194240, 47357693505535, +STORE, 47357693534208, 47357693550591, +STORE, 47357693509632, 47357693534207, +ERASE, 47357693509632, 47357693509632, +STORE, 47357693509632, 47357693534207, +ERASE, 47357693534208, 47357693534208, +STORE, 47357693534208, 47357693550591, +STORE, 47357693550592, 47357693685759, +ERASE, 47357693550592, 47357693550592, +STORE, 47357693550592, 47357693575167, +STORE, 47357693575168, 47357693685759, +STORE, 47357693636608, 47357693685759, +STORE, 47357693575168, 47357693636607, +ERASE, 47357693575168, 47357693575168, +STORE, 47357693575168, 47357693636607, +STORE, 47357693661184, 47357693685759, +STORE, 47357693636608, 47357693661183, +ERASE, 47357693636608, 47357693636608, +STORE, 47357693636608, 47357693685759, +ERASE, 47357693636608, 47357693636608, +STORE, 47357693636608, 47357693661183, +STORE, 47357693661184, 47357693685759, +STORE, 47357693669376, 47357693685759, +STORE, 47357693661184, 47357693669375, +ERASE, 47357693661184, 47357693661184, +STORE, 47357693661184, 47357693669375, +ERASE, 47357693669376, 47357693669376, +STORE, 47357693669376, 47357693685759, +STORE, 47357693685760, 47357693706239, +ERASE, 47357693685760, 47357693685760, +STORE, 47357693685760, 47357693689855, +STORE, 47357693689856, 47357693706239, +STORE, 47357693693952, 47357693706239, +STORE, 47357693689856, 47357693693951, +ERASE, 47357693689856, 47357693689856, +STORE, 47357693689856, 47357693693951, +STORE, 47357693698048, 47357693706239, +STORE, 47357693693952, 47357693698047, +ERASE, 47357693693952, 47357693693952, +STORE, 47357693693952, 47357693706239, +ERASE, 47357693693952, 47357693693952, +STORE, 47357693693952, 47357693698047, +STORE, 47357693698048, 47357693706239, +ERASE, 47357693698048, 47357693698048, +STORE, 47357693698048, 47357693706239, +STORE, 47357693706240, 47357693714431, +ERASE, 47357693509632, 47357693509632, +STORE, 47357693509632, 47357693526015, +STORE, 47357693526016, 47357693534207, +ERASE, 47357693698048, 47357693698048, +STORE, 47357693698048, 47357693702143, +STORE, 47357693702144, 47357693706239, +ERASE, 47357693661184, 47357693661184, +STORE, 47357693661184, 47357693665279, +STORE, 47357693665280, 47357693669375, +ERASE, 47357691490304, 47357691490304, +STORE, 47357691490304, 47357691686911, +STORE, 47357691686912, 47357691695103, +ERASE, 47357688651776, 47357688651776, +STORE, 47357688651776, 47357688655871, +STORE, 47357688655872, 47357688659967, +ERASE, 94240509046784, 94240509046784, +STORE, 94240509046784, 94240509054975, +STORE, 94240509054976, 94240509059071, +ERASE, 140275106676736, 140275106676736, +STORE, 140275106676736, 140275106680831, +STORE, 140275106680832, 140275106684927, +ERASE, 47357688479744, 47357688479744, +STORE, 94240518361088, 94240518496255, +STORE, 140737488347136, 140737488351231, +STORE, 140732688277504, 140737488351231, +ERASE, 140732688277504, 140732688277504, +STORE, 140732688277504, 140732688281599, +STORE, 94629171351552, 94629172064255, +ERASE, 94629171351552, 94629171351552, +STORE, 94629171351552, 94629171400703, +STORE, 94629171400704, 94629172064255, +ERASE, 94629171400704, 94629171400704, +STORE, 94629171400704, 94629171945471, +STORE, 94629171945472, 94629172043775, +STORE, 94629172043776, 94629172064255, +STORE, 139770707644416, 139770707816447, +ERASE, 139770707644416, 139770707644416, +STORE, 139770707644416, 139770707648511, +STORE, 139770707648512, 139770707816447, +ERASE, 139770707648512, 139770707648512, +STORE, 139770707648512, 139770707771391, +STORE, 139770707771392, 139770707804159, +STORE, 139770707804160, 139770707812351, +STORE, 139770707812352, 139770707816447, +STORE, 140732689121280, 140732689125375, +STORE, 140732689108992, 140732689121279, +STORE, 47862087352320, 47862087360511, +STORE, 47862087360512, 47862087368703, +STORE, 47862087368704, 47862087475199, +STORE, 47862087385088, 47862087475199, +STORE, 47862087368704, 47862087385087, +ERASE, 47862087385088, 47862087385088, +STORE, 47862087385088, 47862087458815, +STORE, 47862087458816, 47862087475199, +STORE, 47862087438336, 47862087458815, +STORE, 47862087385088, 47862087438335, +ERASE, 47862087385088, 47862087385088, +STORE, 47862087385088, 47862087438335, +STORE, 47862087454720, 47862087458815, +STORE, 47862087438336, 47862087454719, +ERASE, 47862087438336, 47862087438336, +STORE, 47862087438336, 47862087454719, +STORE, 47862087467008, 47862087475199, +STORE, 47862087458816, 47862087467007, +ERASE, 47862087458816, 47862087458816, +STORE, 47862087458816, 47862087467007, +ERASE, 47862087467008, 47862087467008, +STORE, 47862087467008, 47862087475199, +STORE, 47862087475200, 47862089314303, +STORE, 47862087614464, 47862089314303, +STORE, 47862087475200, 47862087614463, +ERASE, 47862087614464, 47862087614464, +STORE, 47862087614464, 47862089273343, +STORE, 47862089273344, 47862089314303, +STORE, 47862088957952, 47862089273343, +STORE, 47862087614464, 47862088957951, +ERASE, 47862087614464, 47862087614464, +STORE, 47862087614464, 47862088957951, +STORE, 47862089269248, 47862089273343, +STORE, 47862088957952, 47862089269247, +ERASE, 47862088957952, 47862088957952, +STORE, 47862088957952, 47862089269247, +STORE, 47862089297920, 47862089314303, +STORE, 47862089273344, 47862089297919, +ERASE, 47862089273344, 47862089273344, +STORE, 47862089273344, 47862089297919, +ERASE, 47862089297920, 47862089297920, +STORE, 47862089297920, 47862089314303, +STORE, 47862089297920, 47862089326591, +ERASE, 47862089273344, 47862089273344, +STORE, 47862089273344, 47862089289727, +STORE, 47862089289728, 47862089297919, +ERASE, 47862087458816, 47862087458816, +STORE, 47862087458816, 47862087462911, +STORE, 47862087462912, 47862087467007, +ERASE, 94629172043776, 94629172043776, +STORE, 94629172043776, 94629172060159, +STORE, 94629172060160, 94629172064255, +ERASE, 139770707804160, 139770707804160, +STORE, 139770707804160, 139770707808255, +STORE, 139770707808256, 139770707812351, +ERASE, 47862087352320, 47862087352320, +STORE, 94629197533184, 94629197668351, +STORE, 140737488347136, 140737488351231, +STORE, 140727540711424, 140737488351231, +ERASE, 140727540711424, 140727540711424, +STORE, 140727540711424, 140727540715519, +STORE, 94299865313280, 94299866025983, +ERASE, 94299865313280, 94299865313280, +STORE, 94299865313280, 94299865362431, +STORE, 94299865362432, 94299866025983, +ERASE, 94299865362432, 94299865362432, +STORE, 94299865362432, 94299865907199, +STORE, 94299865907200, 94299866005503, +STORE, 94299866005504, 94299866025983, +STORE, 140680268763136, 140680268935167, +ERASE, 140680268763136, 140680268763136, +STORE, 140680268763136, 140680268767231, +STORE, 140680268767232, 140680268935167, +ERASE, 140680268767232, 140680268767232, +STORE, 140680268767232, 140680268890111, +STORE, 140680268890112, 140680268922879, +STORE, 140680268922880, 140680268931071, +STORE, 140680268931072, 140680268935167, +STORE, 140727541424128, 140727541428223, +STORE, 140727541411840, 140727541424127, +STORE, 46952526233600, 46952526241791, +STORE, 46952526241792, 46952526249983, +STORE, 46952526249984, 46952526356479, +STORE, 46952526266368, 46952526356479, +STORE, 46952526249984, 46952526266367, +ERASE, 46952526266368, 46952526266368, +STORE, 46952526266368, 46952526340095, +STORE, 46952526340096, 46952526356479, +STORE, 46952526319616, 46952526340095, +STORE, 46952526266368, 46952526319615, +ERASE, 46952526266368, 46952526266368, +STORE, 46952526266368, 46952526319615, +STORE, 46952526336000, 46952526340095, +STORE, 46952526319616, 46952526335999, +ERASE, 46952526319616, 46952526319616, +STORE, 46952526319616, 46952526335999, +STORE, 46952526348288, 46952526356479, +STORE, 46952526340096, 46952526348287, +ERASE, 46952526340096, 46952526340096, +STORE, 46952526340096, 46952526348287, +ERASE, 46952526348288, 46952526348288, +STORE, 46952526348288, 46952526356479, +STORE, 46952526356480, 46952528195583, +STORE, 46952526495744, 46952528195583, +STORE, 46952526356480, 46952526495743, +ERASE, 46952526495744, 46952526495744, +STORE, 46952526495744, 46952528154623, +STORE, 46952528154624, 46952528195583, +STORE, 46952527839232, 46952528154623, +STORE, 46952526495744, 46952527839231, +ERASE, 46952526495744, 46952526495744, +STORE, 46952526495744, 46952527839231, +STORE, 46952528150528, 46952528154623, +STORE, 46952527839232, 46952528150527, +ERASE, 46952527839232, 46952527839232, +STORE, 46952527839232, 46952528150527, +STORE, 46952528179200, 46952528195583, +STORE, 46952528154624, 46952528179199, +ERASE, 46952528154624, 46952528154624, +STORE, 46952528154624, 46952528179199, +ERASE, 46952528179200, 46952528179200, +STORE, 46952528179200, 46952528195583, +STORE, 46952528179200, 46952528207871, +ERASE, 46952528154624, 46952528154624, +STORE, 46952528154624, 46952528171007, +STORE, 46952528171008, 46952528179199, +ERASE, 46952526340096, 46952526340096, +STORE, 46952526340096, 46952526344191, +STORE, 46952526344192, 46952526348287, +ERASE, 94299866005504, 94299866005504, +STORE, 94299866005504, 94299866021887, +STORE, 94299866021888, 94299866025983, +ERASE, 140680268922880, 140680268922880, +STORE, 140680268922880, 140680268926975, +STORE, 140680268926976, 140680268931071, +ERASE, 46952526233600, 46952526233600, +STORE, 140737488347136, 140737488351231, +STORE, 140722874793984, 140737488351231, +ERASE, 140722874793984, 140722874793984, +STORE, 140722874793984, 140722874798079, +STORE, 94448916213760, 94448916926463, +ERASE, 94448916213760, 94448916213760, +STORE, 94448916213760, 94448916262911, +STORE, 94448916262912, 94448916926463, +ERASE, 94448916262912, 94448916262912, +STORE, 94448916262912, 94448916807679, +STORE, 94448916807680, 94448916905983, +STORE, 94448916905984, 94448916926463, +STORE, 140389117046784, 140389117218815, +ERASE, 140389117046784, 140389117046784, +STORE, 140389117046784, 140389117050879, +STORE, 140389117050880, 140389117218815, +ERASE, 140389117050880, 140389117050880, +STORE, 140389117050880, 140389117173759, +STORE, 140389117173760, 140389117206527, +STORE, 140389117206528, 140389117214719, +STORE, 140389117214720, 140389117218815, +STORE, 140722875297792, 140722875301887, +STORE, 140722875285504, 140722875297791, +STORE, 47243677949952, 47243677958143, +STORE, 47243677958144, 47243677966335, +STORE, 47243677966336, 47243678072831, +STORE, 47243677982720, 47243678072831, +STORE, 47243677966336, 47243677982719, +ERASE, 47243677982720, 47243677982720, +STORE, 47243677982720, 47243678056447, +STORE, 47243678056448, 47243678072831, +STORE, 47243678035968, 47243678056447, +STORE, 47243677982720, 47243678035967, +ERASE, 47243677982720, 47243677982720, +STORE, 47243677982720, 47243678035967, +STORE, 47243678052352, 47243678056447, +STORE, 47243678035968, 47243678052351, +ERASE, 47243678035968, 47243678035968, +STORE, 47243678035968, 47243678052351, +STORE, 47243678064640, 47243678072831, +STORE, 47243678056448, 47243678064639, +ERASE, 47243678056448, 47243678056448, +STORE, 47243678056448, 47243678064639, +ERASE, 47243678064640, 47243678064640, +STORE, 47243678064640, 47243678072831, +STORE, 47243678072832, 47243679911935, +STORE, 47243678212096, 47243679911935, +STORE, 47243678072832, 47243678212095, +ERASE, 47243678212096, 47243678212096, +STORE, 47243678212096, 47243679870975, +STORE, 47243679870976, 47243679911935, +STORE, 47243679555584, 47243679870975, +STORE, 47243678212096, 47243679555583, +ERASE, 47243678212096, 47243678212096, +STORE, 47243678212096, 47243679555583, +STORE, 47243679866880, 47243679870975, +STORE, 47243679555584, 47243679866879, +ERASE, 47243679555584, 47243679555584, +STORE, 47243679555584, 47243679866879, +STORE, 47243679895552, 47243679911935, +STORE, 47243679870976, 47243679895551, +ERASE, 47243679870976, 47243679870976, +STORE, 47243679870976, 47243679895551, +ERASE, 47243679895552, 47243679895552, +STORE, 47243679895552, 47243679911935, +STORE, 47243679895552, 47243679924223, +ERASE, 47243679870976, 47243679870976, +STORE, 47243679870976, 47243679887359, +STORE, 47243679887360, 47243679895551, +ERASE, 47243678056448, 47243678056448, +STORE, 47243678056448, 47243678060543, +STORE, 47243678060544, 47243678064639, +ERASE, 94448916905984, 94448916905984, +STORE, 94448916905984, 94448916922367, +STORE, 94448916922368, 94448916926463, +ERASE, 140389117206528, 140389117206528, +STORE, 140389117206528, 140389117210623, +STORE, 140389117210624, 140389117214719, +ERASE, 47243677949952, 47243677949952, +STORE, 140737488347136, 140737488351231, +STORE, 140733068505088, 140737488351231, +ERASE, 140733068505088, 140733068505088, +STORE, 140733068505088, 140733068509183, +STORE, 94207145750528, 94207146463231, +ERASE, 94207145750528, 94207145750528, +STORE, 94207145750528, 94207145799679, +STORE, 94207145799680, 94207146463231, +ERASE, 94207145799680, 94207145799680, +STORE, 94207145799680, 94207146344447, +STORE, 94207146344448, 94207146442751, +STORE, 94207146442752, 94207146463231, +STORE, 140684504911872, 140684505083903, +ERASE, 140684504911872, 140684504911872, +STORE, 140684504911872, 140684504915967, +STORE, 140684504915968, 140684505083903, +ERASE, 140684504915968, 140684504915968, +STORE, 140684504915968, 140684505038847, +STORE, 140684505038848, 140684505071615, +STORE, 140684505071616, 140684505079807, +STORE, 140684505079808, 140684505083903, +STORE, 140733068607488, 140733068611583, +STORE, 140733068595200, 140733068607487, +STORE, 46948290084864, 46948290093055, +STORE, 46948290093056, 46948290101247, +STORE, 46948290101248, 46948290207743, +STORE, 46948290117632, 46948290207743, +STORE, 46948290101248, 46948290117631, +ERASE, 46948290117632, 46948290117632, +STORE, 46948290117632, 46948290191359, +STORE, 46948290191360, 46948290207743, +STORE, 46948290170880, 46948290191359, +STORE, 46948290117632, 46948290170879, +ERASE, 46948290117632, 46948290117632, +STORE, 46948290117632, 46948290170879, +STORE, 46948290187264, 46948290191359, +STORE, 46948290170880, 46948290187263, +ERASE, 46948290170880, 46948290170880, +STORE, 46948290170880, 46948290187263, +STORE, 46948290199552, 46948290207743, +STORE, 46948290191360, 46948290199551, +ERASE, 46948290191360, 46948290191360, +STORE, 46948290191360, 46948290199551, +ERASE, 46948290199552, 46948290199552, +STORE, 46948290199552, 46948290207743, +STORE, 46948290207744, 46948292046847, +STORE, 46948290347008, 46948292046847, +STORE, 46948290207744, 46948290347007, +ERASE, 46948290347008, 46948290347008, +STORE, 46948290347008, 46948292005887, +STORE, 46948292005888, 46948292046847, +STORE, 46948291690496, 46948292005887, +STORE, 46948290347008, 46948291690495, +ERASE, 46948290347008, 46948290347008, +STORE, 46948290347008, 46948291690495, +STORE, 46948292001792, 46948292005887, +STORE, 46948291690496, 46948292001791, +ERASE, 46948291690496, 46948291690496, +STORE, 46948291690496, 46948292001791, +STORE, 46948292030464, 46948292046847, +STORE, 46948292005888, 46948292030463, +ERASE, 46948292005888, 46948292005888, +STORE, 46948292005888, 46948292030463, +ERASE, 46948292030464, 46948292030464, +STORE, 46948292030464, 46948292046847, +STORE, 46948292030464, 46948292059135, +ERASE, 46948292005888, 46948292005888, +STORE, 46948292005888, 46948292022271, +STORE, 46948292022272, 46948292030463, +ERASE, 46948290191360, 46948290191360, +STORE, 46948290191360, 46948290195455, +STORE, 46948290195456, 46948290199551, +ERASE, 94207146442752, 94207146442752, +STORE, 94207146442752, 94207146459135, +STORE, 94207146459136, 94207146463231, +ERASE, 140684505071616, 140684505071616, +STORE, 140684505071616, 140684505075711, +STORE, 140684505075712, 140684505079807, +ERASE, 46948290084864, 46948290084864, +STORE, 140737488347136, 140737488351231, +STORE, 140726367158272, 140737488351231, +ERASE, 140726367158272, 140726367158272, +STORE, 140726367158272, 140726367162367, +STORE, 94436124106752, 94436124819455, +ERASE, 94436124106752, 94436124106752, +STORE, 94436124106752, 94436124155903, +STORE, 94436124155904, 94436124819455, +ERASE, 94436124155904, 94436124155904, +STORE, 94436124155904, 94436124700671, +STORE, 94436124700672, 94436124798975, +STORE, 94436124798976, 94436124819455, +STORE, 140049025044480, 140049025216511, +ERASE, 140049025044480, 140049025044480, +STORE, 140049025044480, 140049025048575, +STORE, 140049025048576, 140049025216511, +ERASE, 140049025048576, 140049025048576, +STORE, 140049025048576, 140049025171455, +STORE, 140049025171456, 140049025204223, +STORE, 140049025204224, 140049025212415, +STORE, 140049025212416, 140049025216511, +STORE, 140726367256576, 140726367260671, +STORE, 140726367244288, 140726367256575, +STORE, 47583769952256, 47583769960447, +STORE, 47583769960448, 47583769968639, +STORE, 47583769968640, 47583770075135, +STORE, 47583769985024, 47583770075135, +STORE, 47583769968640, 47583769985023, +ERASE, 47583769985024, 47583769985024, +STORE, 47583769985024, 47583770058751, +STORE, 47583770058752, 47583770075135, +STORE, 47583770038272, 47583770058751, +STORE, 47583769985024, 47583770038271, +ERASE, 47583769985024, 47583769985024, +STORE, 47583769985024, 47583770038271, +STORE, 47583770054656, 47583770058751, +STORE, 47583770038272, 47583770054655, +ERASE, 47583770038272, 47583770038272, +STORE, 47583770038272, 47583770054655, +STORE, 47583770066944, 47583770075135, +STORE, 47583770058752, 47583770066943, +ERASE, 47583770058752, 47583770058752, +STORE, 47583770058752, 47583770066943, +ERASE, 47583770066944, 47583770066944, +STORE, 47583770066944, 47583770075135, +STORE, 47583770075136, 47583771914239, +STORE, 47583770214400, 47583771914239, +STORE, 47583770075136, 47583770214399, +ERASE, 47583770214400, 47583770214400, +STORE, 47583770214400, 47583771873279, +STORE, 47583771873280, 47583771914239, +STORE, 47583771557888, 47583771873279, +STORE, 47583770214400, 47583771557887, +ERASE, 47583770214400, 47583770214400, +STORE, 47583770214400, 47583771557887, +STORE, 47583771869184, 47583771873279, +STORE, 47583771557888, 47583771869183, +ERASE, 47583771557888, 47583771557888, +STORE, 47583771557888, 47583771869183, +STORE, 47583771897856, 47583771914239, +STORE, 47583771873280, 47583771897855, +ERASE, 47583771873280, 47583771873280, +STORE, 47583771873280, 47583771897855, +ERASE, 47583771897856, 47583771897856, +STORE, 47583771897856, 47583771914239, +STORE, 47583771897856, 47583771926527, +ERASE, 47583771873280, 47583771873280, +STORE, 47583771873280, 47583771889663, +STORE, 47583771889664, 47583771897855, +ERASE, 47583770058752, 47583770058752, +STORE, 47583770058752, 47583770062847, +STORE, 47583770062848, 47583770066943, +ERASE, 94436124798976, 94436124798976, +STORE, 94436124798976, 94436124815359, +STORE, 94436124815360, 94436124819455, +ERASE, 140049025204224, 140049025204224, +STORE, 140049025204224, 140049025208319, +STORE, 140049025208320, 140049025212415, +ERASE, 47583769952256, 47583769952256, +STORE, 140737488347136, 140737488351231, +STORE, 140727116099584, 140737488351231, +ERASE, 140727116099584, 140727116099584, +STORE, 140727116099584, 140727116103679, +STORE, 94166319734784, 94166320447487, +ERASE, 94166319734784, 94166319734784, +STORE, 94166319734784, 94166319783935, +STORE, 94166319783936, 94166320447487, +ERASE, 94166319783936, 94166319783936, +STORE, 94166319783936, 94166320328703, +STORE, 94166320328704, 94166320427007, +STORE, 94166320427008, 94166320447487, +STORE, 139976559542272, 139976559714303, +ERASE, 139976559542272, 139976559542272, +STORE, 139976559542272, 139976559546367, +STORE, 139976559546368, 139976559714303, +ERASE, 139976559546368, 139976559546368, +STORE, 139976559546368, 139976559669247, +STORE, 139976559669248, 139976559702015, +STORE, 139976559702016, 139976559710207, +STORE, 139976559710208, 139976559714303, +STORE, 140727116222464, 140727116226559, +STORE, 140727116210176, 140727116222463, +STORE, 47656235454464, 47656235462655, +STORE, 47656235462656, 47656235470847, +STORE, 47656235470848, 47656235577343, +STORE, 47656235487232, 47656235577343, +STORE, 47656235470848, 47656235487231, +ERASE, 47656235487232, 47656235487232, +STORE, 47656235487232, 47656235560959, +STORE, 47656235560960, 47656235577343, +STORE, 47656235540480, 47656235560959, +STORE, 47656235487232, 47656235540479, +ERASE, 47656235487232, 47656235487232, +STORE, 47656235487232, 47656235540479, +STORE, 47656235556864, 47656235560959, +STORE, 47656235540480, 47656235556863, +ERASE, 47656235540480, 47656235540480, +STORE, 47656235540480, 47656235556863, +STORE, 47656235569152, 47656235577343, +STORE, 47656235560960, 47656235569151, +ERASE, 47656235560960, 47656235560960, +STORE, 47656235560960, 47656235569151, +ERASE, 47656235569152, 47656235569152, +STORE, 47656235569152, 47656235577343, +STORE, 47656235577344, 47656237416447, +STORE, 47656235716608, 47656237416447, +STORE, 47656235577344, 47656235716607, +ERASE, 47656235716608, 47656235716608, +STORE, 47656235716608, 47656237375487, +STORE, 47656237375488, 47656237416447, +STORE, 47656237060096, 47656237375487, +STORE, 47656235716608, 47656237060095, +ERASE, 47656235716608, 47656235716608, +STORE, 47656235716608, 47656237060095, +STORE, 47656237371392, 47656237375487, +STORE, 47656237060096, 47656237371391, +ERASE, 47656237060096, 47656237060096, +STORE, 47656237060096, 47656237371391, +STORE, 47656237400064, 47656237416447, +STORE, 47656237375488, 47656237400063, +ERASE, 47656237375488, 47656237375488, +STORE, 47656237375488, 47656237400063, +ERASE, 47656237400064, 47656237400064, +STORE, 47656237400064, 47656237416447, +STORE, 47656237400064, 47656237428735, +ERASE, 47656237375488, 47656237375488, +STORE, 47656237375488, 47656237391871, +STORE, 47656237391872, 47656237400063, +ERASE, 47656235560960, 47656235560960, +STORE, 47656235560960, 47656235565055, +STORE, 47656235565056, 47656235569151, +ERASE, 94166320427008, 94166320427008, +STORE, 94166320427008, 94166320443391, +STORE, 94166320443392, 94166320447487, +ERASE, 139976559702016, 139976559702016, +STORE, 139976559702016, 139976559706111, +STORE, 139976559706112, 139976559710207, +ERASE, 47656235454464, 47656235454464, +STORE, 94166332153856, 94166332289023, +STORE, 140737488347136, 140737488351231, +STORE, 140726412816384, 140737488351231, +ERASE, 140726412816384, 140726412816384, +STORE, 140726412816384, 140726412820479, +STORE, 94094884507648, 94094885220351, +ERASE, 94094884507648, 94094884507648, +STORE, 94094884507648, 94094884556799, +STORE, 94094884556800, 94094885220351, +ERASE, 94094884556800, 94094884556800, +STORE, 94094884556800, 94094885101567, +STORE, 94094885101568, 94094885199871, +STORE, 94094885199872, 94094885220351, +STORE, 139773773938688, 139773774110719, +ERASE, 139773773938688, 139773773938688, +STORE, 139773773938688, 139773773942783, +STORE, 139773773942784, 139773774110719, +ERASE, 139773773942784, 139773773942784, +STORE, 139773773942784, 139773774065663, +STORE, 139773774065664, 139773774098431, +STORE, 139773774098432, 139773774106623, +STORE, 139773774106624, 139773774110719, +STORE, 140726412963840, 140726412967935, +STORE, 140726412951552, 140726412963839, +STORE, 47859021058048, 47859021066239, +STORE, 47859021066240, 47859021074431, +STORE, 47859021074432, 47859021180927, +STORE, 47859021090816, 47859021180927, +STORE, 47859021074432, 47859021090815, +ERASE, 47859021090816, 47859021090816, +STORE, 47859021090816, 47859021164543, +STORE, 47859021164544, 47859021180927, +STORE, 47859021144064, 47859021164543, +STORE, 47859021090816, 47859021144063, +ERASE, 47859021090816, 47859021090816, +STORE, 47859021090816, 47859021144063, +STORE, 47859021160448, 47859021164543, +STORE, 47859021144064, 47859021160447, +ERASE, 47859021144064, 47859021144064, +STORE, 47859021144064, 47859021160447, +STORE, 47859021172736, 47859021180927, +STORE, 47859021164544, 47859021172735, +ERASE, 47859021164544, 47859021164544, +STORE, 47859021164544, 47859021172735, +ERASE, 47859021172736, 47859021172736, +STORE, 47859021172736, 47859021180927, +STORE, 47859021180928, 47859023020031, +STORE, 47859021320192, 47859023020031, +STORE, 47859021180928, 47859021320191, +ERASE, 47859021320192, 47859021320192, +STORE, 47859021320192, 47859022979071, +STORE, 47859022979072, 47859023020031, +STORE, 47859022663680, 47859022979071, +STORE, 47859021320192, 47859022663679, +ERASE, 47859021320192, 47859021320192, +STORE, 47859021320192, 47859022663679, +STORE, 47859022974976, 47859022979071, +STORE, 47859022663680, 47859022974975, +ERASE, 47859022663680, 47859022663680, +STORE, 47859022663680, 47859022974975, +STORE, 47859023003648, 47859023020031, +STORE, 47859022979072, 47859023003647, +ERASE, 47859022979072, 47859022979072, +STORE, 47859022979072, 47859023003647, +ERASE, 47859023003648, 47859023003648, +STORE, 47859023003648, 47859023020031, +STORE, 47859023003648, 47859023032319, +ERASE, 47859022979072, 47859022979072, +STORE, 47859022979072, 47859022995455, +STORE, 47859022995456, 47859023003647, +ERASE, 47859021164544, 47859021164544, +STORE, 47859021164544, 47859021168639, +STORE, 47859021168640, 47859021172735, +ERASE, 94094885199872, 94094885199872, +STORE, 94094885199872, 94094885216255, +STORE, 94094885216256, 94094885220351, +ERASE, 139773774098432, 139773774098432, +STORE, 139773774098432, 139773774102527, +STORE, 139773774102528, 139773774106623, +ERASE, 47859021058048, 47859021058048, +STORE, 94094901108736, 94094901243903, +STORE, 140737488347136, 140737488351231, +STORE, 140736567963648, 140737488351231, +ERASE, 140736567963648, 140736567963648, +STORE, 140736567963648, 140736567967743, +STORE, 94924425748480, 94924426461183, +ERASE, 94924425748480, 94924425748480, +STORE, 94924425748480, 94924425797631, +STORE, 94924425797632, 94924426461183, +ERASE, 94924425797632, 94924425797632, +STORE, 94924425797632, 94924426342399, +STORE, 94924426342400, 94924426440703, +STORE, 94924426440704, 94924426461183, +STORE, 140042126319616, 140042126491647, +ERASE, 140042126319616, 140042126319616, +STORE, 140042126319616, 140042126323711, +STORE, 140042126323712, 140042126491647, +ERASE, 140042126323712, 140042126323712, +STORE, 140042126323712, 140042126446591, +STORE, 140042126446592, 140042126479359, +STORE, 140042126479360, 140042126487551, +STORE, 140042126487552, 140042126491647, +STORE, 140736568672256, 140736568676351, +STORE, 140736568659968, 140736568672255, +STORE, 47590668677120, 47590668685311, +STORE, 47590668685312, 47590668693503, +STORE, 47590668693504, 47590668799999, +STORE, 47590668709888, 47590668799999, +STORE, 47590668693504, 47590668709887, +ERASE, 47590668709888, 47590668709888, +STORE, 47590668709888, 47590668783615, +STORE, 47590668783616, 47590668799999, +STORE, 47590668763136, 47590668783615, +STORE, 47590668709888, 47590668763135, +ERASE, 47590668709888, 47590668709888, +STORE, 47590668709888, 47590668763135, +STORE, 47590668779520, 47590668783615, +STORE, 47590668763136, 47590668779519, +ERASE, 47590668763136, 47590668763136, +STORE, 47590668763136, 47590668779519, +STORE, 47590668791808, 47590668799999, +STORE, 47590668783616, 47590668791807, +ERASE, 47590668783616, 47590668783616, +STORE, 47590668783616, 47590668791807, +ERASE, 47590668791808, 47590668791808, +STORE, 47590668791808, 47590668799999, +STORE, 47590668800000, 47590670639103, +STORE, 47590668939264, 47590670639103, +STORE, 47590668800000, 47590668939263, +ERASE, 47590668939264, 47590668939264, +STORE, 47590668939264, 47590670598143, +STORE, 47590670598144, 47590670639103, +STORE, 47590670282752, 47590670598143, +STORE, 47590668939264, 47590670282751, +ERASE, 47590668939264, 47590668939264, +STORE, 47590668939264, 47590670282751, +STORE, 47590670594048, 47590670598143, +STORE, 47590670282752, 47590670594047, +ERASE, 47590670282752, 47590670282752, +STORE, 47590670282752, 47590670594047, +STORE, 47590670622720, 47590670639103, +STORE, 47590670598144, 47590670622719, +ERASE, 47590670598144, 47590670598144, +STORE, 47590670598144, 47590670622719, +ERASE, 47590670622720, 47590670622720, +STORE, 47590670622720, 47590670639103, +STORE, 47590670622720, 47590670651391, +ERASE, 47590670598144, 47590670598144, +STORE, 47590670598144, 47590670614527, +STORE, 47590670614528, 47590670622719, +ERASE, 47590668783616, 47590668783616, +STORE, 47590668783616, 47590668787711, +STORE, 47590668787712, 47590668791807, +ERASE, 94924426440704, 94924426440704, +STORE, 94924426440704, 94924426457087, +STORE, 94924426457088, 94924426461183, +ERASE, 140042126479360, 140042126479360, +STORE, 140042126479360, 140042126483455, +STORE, 140042126483456, 140042126487551, +ERASE, 47590668677120, 47590668677120, +STORE, 140737488347136, 140737488351231, +STORE, 140733281439744, 140737488351231, +ERASE, 140733281439744, 140733281439744, +STORE, 140733281439744, 140733281443839, +STORE, 94490667069440, 94490667782143, +ERASE, 94490667069440, 94490667069440, +STORE, 94490667069440, 94490667118591, +STORE, 94490667118592, 94490667782143, +ERASE, 94490667118592, 94490667118592, +STORE, 94490667118592, 94490667663359, +STORE, 94490667663360, 94490667761663, +STORE, 94490667761664, 94490667782143, +STORE, 139878215118848, 139878215290879, +ERASE, 139878215118848, 139878215118848, +STORE, 139878215118848, 139878215122943, +STORE, 139878215122944, 139878215290879, +ERASE, 139878215122944, 139878215122944, +STORE, 139878215122944, 139878215245823, +STORE, 139878215245824, 139878215278591, +STORE, 139878215278592, 139878215286783, +STORE, 139878215286784, 139878215290879, +STORE, 140733281464320, 140733281468415, +STORE, 140733281452032, 140733281464319, +STORE, 47754579877888, 47754579886079, +STORE, 47754579886080, 47754579894271, +STORE, 47754579894272, 47754580000767, +STORE, 47754579910656, 47754580000767, +STORE, 47754579894272, 47754579910655, +ERASE, 47754579910656, 47754579910656, +STORE, 47754579910656, 47754579984383, +STORE, 47754579984384, 47754580000767, +STORE, 47754579963904, 47754579984383, +STORE, 47754579910656, 47754579963903, +ERASE, 47754579910656, 47754579910656, +STORE, 47754579910656, 47754579963903, +STORE, 47754579980288, 47754579984383, +STORE, 47754579963904, 47754579980287, +ERASE, 47754579963904, 47754579963904, +STORE, 47754579963904, 47754579980287, +STORE, 47754579992576, 47754580000767, +STORE, 47754579984384, 47754579992575, +ERASE, 47754579984384, 47754579984384, +STORE, 47754579984384, 47754579992575, +ERASE, 47754579992576, 47754579992576, +STORE, 47754579992576, 47754580000767, +STORE, 47754580000768, 47754581839871, +STORE, 47754580140032, 47754581839871, +STORE, 47754580000768, 47754580140031, +ERASE, 47754580140032, 47754580140032, +STORE, 47754580140032, 47754581798911, +STORE, 47754581798912, 47754581839871, +STORE, 47754581483520, 47754581798911, +STORE, 47754580140032, 47754581483519, +ERASE, 47754580140032, 47754580140032, +STORE, 47754580140032, 47754581483519, +STORE, 47754581794816, 47754581798911, +STORE, 47754581483520, 47754581794815, +ERASE, 47754581483520, 47754581483520, +STORE, 47754581483520, 47754581794815, +STORE, 47754581823488, 47754581839871, +STORE, 47754581798912, 47754581823487, +ERASE, 47754581798912, 47754581798912, +STORE, 47754581798912, 47754581823487, +ERASE, 47754581823488, 47754581823488, +STORE, 47754581823488, 47754581839871, +STORE, 47754581823488, 47754581852159, +ERASE, 47754581798912, 47754581798912, +STORE, 47754581798912, 47754581815295, +STORE, 47754581815296, 47754581823487, +ERASE, 47754579984384, 47754579984384, +STORE, 47754579984384, 47754579988479, +STORE, 47754579988480, 47754579992575, +ERASE, 94490667761664, 94490667761664, +STORE, 94490667761664, 94490667778047, +STORE, 94490667778048, 94490667782143, +ERASE, 139878215278592, 139878215278592, +STORE, 139878215278592, 139878215282687, +STORE, 139878215282688, 139878215286783, +ERASE, 47754579877888, 47754579877888, +STORE, 94490669649920, 94490669785087, +STORE, 140737488347136, 140737488351231, +STORE, 140735382188032, 140737488351231, +ERASE, 140735382188032, 140735382188032, +STORE, 140735382188032, 140735382192127, +STORE, 94150181302272, 94150182014975, +ERASE, 94150181302272, 94150181302272, +STORE, 94150181302272, 94150181351423, +STORE, 94150181351424, 94150182014975, +ERASE, 94150181351424, 94150181351424, +STORE, 94150181351424, 94150181896191, +STORE, 94150181896192, 94150181994495, +STORE, 94150181994496, 94150182014975, +STORE, 139679752458240, 139679752630271, +ERASE, 139679752458240, 139679752458240, +STORE, 139679752458240, 139679752462335, +STORE, 139679752462336, 139679752630271, +ERASE, 139679752462336, 139679752462336, +STORE, 139679752462336, 139679752585215, +STORE, 139679752585216, 139679752617983, +STORE, 139679752617984, 139679752626175, +STORE, 139679752626176, 139679752630271, +STORE, 140735382536192, 140735382540287, +STORE, 140735382523904, 140735382536191, +STORE, 47953042538496, 47953042546687, +STORE, 47953042546688, 47953042554879, +STORE, 47953042554880, 47953042661375, +STORE, 47953042571264, 47953042661375, +STORE, 47953042554880, 47953042571263, +ERASE, 47953042571264, 47953042571264, +STORE, 47953042571264, 47953042644991, +STORE, 47953042644992, 47953042661375, +STORE, 47953042624512, 47953042644991, +STORE, 47953042571264, 47953042624511, +ERASE, 47953042571264, 47953042571264, +STORE, 47953042571264, 47953042624511, +STORE, 47953042640896, 47953042644991, +STORE, 47953042624512, 47953042640895, +ERASE, 47953042624512, 47953042624512, +STORE, 47953042624512, 47953042640895, +STORE, 47953042653184, 47953042661375, +STORE, 47953042644992, 47953042653183, +ERASE, 47953042644992, 47953042644992, +STORE, 47953042644992, 47953042653183, +ERASE, 47953042653184, 47953042653184, +STORE, 47953042653184, 47953042661375, +STORE, 47953042661376, 47953044500479, +STORE, 47953042800640, 47953044500479, +STORE, 47953042661376, 47953042800639, +ERASE, 47953042800640, 47953042800640, +STORE, 47953042800640, 47953044459519, +STORE, 47953044459520, 47953044500479, +STORE, 47953044144128, 47953044459519, +STORE, 47953042800640, 47953044144127, +ERASE, 47953042800640, 47953042800640, +STORE, 47953042800640, 47953044144127, +STORE, 47953044455424, 47953044459519, +STORE, 47953044144128, 47953044455423, +ERASE, 47953044144128, 47953044144128, +STORE, 47953044144128, 47953044455423, +STORE, 47953044484096, 47953044500479, +STORE, 47953044459520, 47953044484095, +ERASE, 47953044459520, 47953044459520, +STORE, 47953044459520, 47953044484095, +ERASE, 47953044484096, 47953044484096, +STORE, 47953044484096, 47953044500479, +STORE, 47953044484096, 47953044512767, +ERASE, 47953044459520, 47953044459520, +STORE, 47953044459520, 47953044475903, +STORE, 47953044475904, 47953044484095, +ERASE, 47953042644992, 47953042644992, +STORE, 47953042644992, 47953042649087, +STORE, 47953042649088, 47953042653183, +ERASE, 94150181994496, 94150181994496, +STORE, 94150181994496, 94150182010879, +STORE, 94150182010880, 94150182014975, +ERASE, 139679752617984, 139679752617984, +STORE, 139679752617984, 139679752622079, +STORE, 139679752622080, 139679752626175, +ERASE, 47953042538496, 47953042538496, +STORE, 140737488347136, 140737488351231, +STORE, 140737044123648, 140737488351231, +ERASE, 140737044123648, 140737044123648, +STORE, 140737044123648, 140737044127743, +STORE, 94425324294144, 94425325006847, +ERASE, 94425324294144, 94425324294144, +STORE, 94425324294144, 94425324343295, +STORE, 94425324343296, 94425325006847, +ERASE, 94425324343296, 94425324343296, +STORE, 94425324343296, 94425324888063, +STORE, 94425324888064, 94425324986367, +STORE, 94425324986368, 94425325006847, +STORE, 140382015016960, 140382015188991, +ERASE, 140382015016960, 140382015016960, +STORE, 140382015016960, 140382015021055, +STORE, 140382015021056, 140382015188991, +ERASE, 140382015021056, 140382015021056, +STORE, 140382015021056, 140382015143935, +STORE, 140382015143936, 140382015176703, +STORE, 140382015176704, 140382015184895, +STORE, 140382015184896, 140382015188991, +STORE, 140737045585920, 140737045590015, +STORE, 140737045573632, 140737045585919, +STORE, 47250779979776, 47250779987967, +STORE, 47250779987968, 47250779996159, +STORE, 47250779996160, 47250780102655, +STORE, 47250780012544, 47250780102655, +STORE, 47250779996160, 47250780012543, +ERASE, 47250780012544, 47250780012544, +STORE, 47250780012544, 47250780086271, +STORE, 47250780086272, 47250780102655, +STORE, 47250780065792, 47250780086271, +STORE, 47250780012544, 47250780065791, +ERASE, 47250780012544, 47250780012544, +STORE, 47250780012544, 47250780065791, +STORE, 47250780082176, 47250780086271, +STORE, 47250780065792, 47250780082175, +ERASE, 47250780065792, 47250780065792, +STORE, 47250780065792, 47250780082175, +STORE, 47250780094464, 47250780102655, +STORE, 47250780086272, 47250780094463, +ERASE, 47250780086272, 47250780086272, +STORE, 47250780086272, 47250780094463, +ERASE, 47250780094464, 47250780094464, +STORE, 47250780094464, 47250780102655, +STORE, 47250780102656, 47250781941759, +STORE, 47250780241920, 47250781941759, +STORE, 47250780102656, 47250780241919, +ERASE, 47250780241920, 47250780241920, +STORE, 47250780241920, 47250781900799, +STORE, 47250781900800, 47250781941759, +STORE, 47250781585408, 47250781900799, +STORE, 47250780241920, 47250781585407, +ERASE, 47250780241920, 47250780241920, +STORE, 47250780241920, 47250781585407, +STORE, 47250781896704, 47250781900799, +STORE, 47250781585408, 47250781896703, +ERASE, 47250781585408, 47250781585408, +STORE, 47250781585408, 47250781896703, +STORE, 47250781925376, 47250781941759, +STORE, 47250781900800, 47250781925375, +ERASE, 47250781900800, 47250781900800, +STORE, 47250781900800, 47250781925375, +ERASE, 47250781925376, 47250781925376, +STORE, 47250781925376, 47250781941759, +STORE, 47250781925376, 47250781954047, +ERASE, 47250781900800, 47250781900800, +STORE, 47250781900800, 47250781917183, +STORE, 47250781917184, 47250781925375, +ERASE, 47250780086272, 47250780086272, +STORE, 47250780086272, 47250780090367, +STORE, 47250780090368, 47250780094463, +ERASE, 94425324986368, 94425324986368, +STORE, 94425324986368, 94425325002751, +STORE, 94425325002752, 94425325006847, +ERASE, 140382015176704, 140382015176704, +STORE, 140382015176704, 140382015180799, +STORE, 140382015180800, 140382015184895, +ERASE, 47250779979776, 47250779979776, +STORE, 94425351438336, 94425351573503, +STORE, 140737488347136, 140737488351231, +STORE, 140736801144832, 140737488351231, +ERASE, 140736801144832, 140736801144832, +STORE, 140736801144832, 140736801148927, +STORE, 94629429358592, 94629430071295, +ERASE, 94629429358592, 94629429358592, +STORE, 94629429358592, 94629429407743, +STORE, 94629429407744, 94629430071295, +ERASE, 94629429407744, 94629429407744, +STORE, 94629429407744, 94629429952511, +STORE, 94629429952512, 94629430050815, +STORE, 94629430050816, 94629430071295, +STORE, 139801685483520, 139801685655551, +ERASE, 139801685483520, 139801685483520, +STORE, 139801685483520, 139801685487615, +STORE, 139801685487616, 139801685655551, +ERASE, 139801685487616, 139801685487616, +STORE, 139801685487616, 139801685610495, +STORE, 139801685610496, 139801685643263, +STORE, 139801685643264, 139801685651455, +STORE, 139801685651456, 139801685655551, +STORE, 140736801198080, 140736801202175, +STORE, 140736801185792, 140736801198079, +STORE, 47831109513216, 47831109521407, +STORE, 47831109521408, 47831109529599, +STORE, 47831109529600, 47831109636095, +STORE, 47831109545984, 47831109636095, +STORE, 47831109529600, 47831109545983, +ERASE, 47831109545984, 47831109545984, +STORE, 47831109545984, 47831109619711, +STORE, 47831109619712, 47831109636095, +STORE, 47831109599232, 47831109619711, +STORE, 47831109545984, 47831109599231, +ERASE, 47831109545984, 47831109545984, +STORE, 47831109545984, 47831109599231, +STORE, 47831109615616, 47831109619711, +STORE, 47831109599232, 47831109615615, +ERASE, 47831109599232, 47831109599232, +STORE, 47831109599232, 47831109615615, +STORE, 47831109627904, 47831109636095, +STORE, 47831109619712, 47831109627903, +ERASE, 47831109619712, 47831109619712, +STORE, 47831109619712, 47831109627903, +ERASE, 47831109627904, 47831109627904, +STORE, 47831109627904, 47831109636095, +STORE, 47831109636096, 47831111475199, +STORE, 47831109775360, 47831111475199, +STORE, 47831109636096, 47831109775359, +ERASE, 47831109775360, 47831109775360, +STORE, 47831109775360, 47831111434239, +STORE, 47831111434240, 47831111475199, +STORE, 47831111118848, 47831111434239, +STORE, 47831109775360, 47831111118847, +ERASE, 47831109775360, 47831109775360, +STORE, 47831109775360, 47831111118847, +STORE, 47831111430144, 47831111434239, +STORE, 47831111118848, 47831111430143, +ERASE, 47831111118848, 47831111118848, +STORE, 47831111118848, 47831111430143, +STORE, 47831111458816, 47831111475199, +STORE, 47831111434240, 47831111458815, +ERASE, 47831111434240, 47831111434240, +STORE, 47831111434240, 47831111458815, +ERASE, 47831111458816, 47831111458816, +STORE, 47831111458816, 47831111475199, +STORE, 47831111458816, 47831111487487, +ERASE, 47831111434240, 47831111434240, +STORE, 47831111434240, 47831111450623, +STORE, 47831111450624, 47831111458815, +ERASE, 47831109619712, 47831109619712, +STORE, 47831109619712, 47831109623807, +STORE, 47831109623808, 47831109627903, +ERASE, 94629430050816, 94629430050816, +STORE, 94629430050816, 94629430067199, +STORE, 94629430067200, 94629430071295, +ERASE, 139801685643264, 139801685643264, +STORE, 139801685643264, 139801685647359, +STORE, 139801685647360, 139801685651455, +ERASE, 47831109513216, 47831109513216, +STORE, 140737488347136, 140737488351231, +STORE, 140729419612160, 140737488351231, +ERASE, 140729419612160, 140729419612160, +STORE, 140729419612160, 140729419616255, +STORE, 94443354148864, 94443354861567, +ERASE, 94443354148864, 94443354148864, +STORE, 94443354148864, 94443354198015, +STORE, 94443354198016, 94443354861567, +ERASE, 94443354198016, 94443354198016, +STORE, 94443354198016, 94443354742783, +STORE, 94443354742784, 94443354841087, +STORE, 94443354841088, 94443354861567, +STORE, 139741700038656, 139741700210687, +ERASE, 139741700038656, 139741700038656, +STORE, 139741700038656, 139741700042751, +STORE, 139741700042752, 139741700210687, +ERASE, 139741700042752, 139741700042752, +STORE, 139741700042752, 139741700165631, +STORE, 139741700165632, 139741700198399, +STORE, 139741700198400, 139741700206591, +STORE, 139741700206592, 139741700210687, +STORE, 140729420574720, 140729420578815, +STORE, 140729420562432, 140729420574719, +STORE, 47891094958080, 47891094966271, +STORE, 47891094966272, 47891094974463, +STORE, 47891094974464, 47891095080959, +STORE, 47891094990848, 47891095080959, +STORE, 47891094974464, 47891094990847, +ERASE, 47891094990848, 47891094990848, +STORE, 47891094990848, 47891095064575, +STORE, 47891095064576, 47891095080959, +STORE, 47891095044096, 47891095064575, +STORE, 47891094990848, 47891095044095, +ERASE, 47891094990848, 47891094990848, +STORE, 47891094990848, 47891095044095, +STORE, 47891095060480, 47891095064575, +STORE, 47891095044096, 47891095060479, +ERASE, 47891095044096, 47891095044096, +STORE, 47891095044096, 47891095060479, +STORE, 47891095072768, 47891095080959, +STORE, 47891095064576, 47891095072767, +ERASE, 47891095064576, 47891095064576, +STORE, 47891095064576, 47891095072767, +ERASE, 47891095072768, 47891095072768, +STORE, 47891095072768, 47891095080959, +STORE, 47891095080960, 47891096920063, +STORE, 47891095220224, 47891096920063, +STORE, 47891095080960, 47891095220223, +ERASE, 47891095220224, 47891095220224, +STORE, 47891095220224, 47891096879103, +STORE, 47891096879104, 47891096920063, +STORE, 47891096563712, 47891096879103, +STORE, 47891095220224, 47891096563711, +ERASE, 47891095220224, 47891095220224, +STORE, 47891095220224, 47891096563711, +STORE, 47891096875008, 47891096879103, +STORE, 47891096563712, 47891096875007, +ERASE, 47891096563712, 47891096563712, +STORE, 47891096563712, 47891096875007, +STORE, 47891096903680, 47891096920063, +STORE, 47891096879104, 47891096903679, +ERASE, 47891096879104, 47891096879104, +STORE, 47891096879104, 47891096903679, +ERASE, 47891096903680, 47891096903680, +STORE, 47891096903680, 47891096920063, +STORE, 47891096903680, 47891096932351, +ERASE, 47891096879104, 47891096879104, +STORE, 47891096879104, 47891096895487, +STORE, 47891096895488, 47891096903679, +ERASE, 47891095064576, 47891095064576, +STORE, 47891095064576, 47891095068671, +STORE, 47891095068672, 47891095072767, +ERASE, 94443354841088, 94443354841088, +STORE, 94443354841088, 94443354857471, +STORE, 94443354857472, 94443354861567, +ERASE, 139741700198400, 139741700198400, +STORE, 139741700198400, 139741700202495, +STORE, 139741700202496, 139741700206591, +ERASE, 47891094958080, 47891094958080, +STORE, 94443360825344, 94443360960511, +STORE, 140737488347136, 140737488351231, +STORE, 140722961661952, 140737488351231, +ERASE, 140722961661952, 140722961661952, +STORE, 140722961661952, 140722961666047, +STORE, 94878388944896, 94878389657599, +ERASE, 94878388944896, 94878388944896, +STORE, 94878388944896, 94878388994047, +STORE, 94878388994048, 94878389657599, +ERASE, 94878388994048, 94878388994048, +STORE, 94878388994048, 94878389538815, +STORE, 94878389538816, 94878389637119, +STORE, 94878389637120, 94878389657599, +STORE, 140210690056192, 140210690228223, +ERASE, 140210690056192, 140210690056192, +STORE, 140210690056192, 140210690060287, +STORE, 140210690060288, 140210690228223, +ERASE, 140210690060288, 140210690060288, +STORE, 140210690060288, 140210690183167, +STORE, 140210690183168, 140210690215935, +STORE, 140210690215936, 140210690224127, +STORE, 140210690224128, 140210690228223, +STORE, 140722963148800, 140722963152895, +STORE, 140722963136512, 140722963148799, +STORE, 47422104940544, 47422104948735, +STORE, 47422104948736, 47422104956927, +STORE, 47422104956928, 47422105063423, +STORE, 47422104973312, 47422105063423, +STORE, 47422104956928, 47422104973311, +ERASE, 47422104973312, 47422104973312, +STORE, 47422104973312, 47422105047039, +STORE, 47422105047040, 47422105063423, +STORE, 47422105026560, 47422105047039, +STORE, 47422104973312, 47422105026559, +ERASE, 47422104973312, 47422104973312, +STORE, 47422104973312, 47422105026559, +STORE, 47422105042944, 47422105047039, +STORE, 47422105026560, 47422105042943, +ERASE, 47422105026560, 47422105026560, +STORE, 47422105026560, 47422105042943, +STORE, 47422105055232, 47422105063423, +STORE, 47422105047040, 47422105055231, +ERASE, 47422105047040, 47422105047040, +STORE, 47422105047040, 47422105055231, +ERASE, 47422105055232, 47422105055232, +STORE, 47422105055232, 47422105063423, +STORE, 47422105063424, 47422106902527, +STORE, 47422105202688, 47422106902527, +STORE, 47422105063424, 47422105202687, +ERASE, 47422105202688, 47422105202688, +STORE, 47422105202688, 47422106861567, +STORE, 47422106861568, 47422106902527, +STORE, 47422106546176, 47422106861567, +STORE, 47422105202688, 47422106546175, +ERASE, 47422105202688, 47422105202688, +STORE, 47422105202688, 47422106546175, +STORE, 47422106857472, 47422106861567, +STORE, 47422106546176, 47422106857471, +ERASE, 47422106546176, 47422106546176, +STORE, 47422106546176, 47422106857471, +STORE, 47422106886144, 47422106902527, +STORE, 47422106861568, 47422106886143, +ERASE, 47422106861568, 47422106861568, +STORE, 47422106861568, 47422106886143, +ERASE, 47422106886144, 47422106886144, +STORE, 47422106886144, 47422106902527, +STORE, 47422106886144, 47422106914815, +ERASE, 47422106861568, 47422106861568, +STORE, 47422106861568, 47422106877951, +STORE, 47422106877952, 47422106886143, +ERASE, 47422105047040, 47422105047040, +STORE, 47422105047040, 47422105051135, +STORE, 47422105051136, 47422105055231, +ERASE, 94878389637120, 94878389637120, +STORE, 94878389637120, 94878389653503, +STORE, 94878389653504, 94878389657599, +ERASE, 140210690215936, 140210690215936, +STORE, 140210690215936, 140210690220031, +STORE, 140210690220032, 140210690224127, +ERASE, 47422104940544, 47422104940544, +STORE, 140737488347136, 140737488351231, +STORE, 140727690309632, 140737488351231, +ERASE, 140727690309632, 140727690309632, +STORE, 140727690309632, 140727690313727, +STORE, 94121892208640, 94121892921343, +ERASE, 94121892208640, 94121892208640, +STORE, 94121892208640, 94121892257791, +STORE, 94121892257792, 94121892921343, +ERASE, 94121892257792, 94121892257792, +STORE, 94121892257792, 94121892802559, +STORE, 94121892802560, 94121892900863, +STORE, 94121892900864, 94121892921343, +STORE, 140662438326272, 140662438498303, +ERASE, 140662438326272, 140662438326272, +STORE, 140662438326272, 140662438330367, +STORE, 140662438330368, 140662438498303, +ERASE, 140662438330368, 140662438330368, +STORE, 140662438330368, 140662438453247, +STORE, 140662438453248, 140662438486015, +STORE, 140662438486016, 140662438494207, +STORE, 140662438494208, 140662438498303, +STORE, 140727690379264, 140727690383359, +STORE, 140727690366976, 140727690379263, +STORE, 46970356670464, 46970356678655, +STORE, 46970356678656, 46970356686847, +STORE, 46970356686848, 46970356793343, +STORE, 46970356703232, 46970356793343, +STORE, 46970356686848, 46970356703231, +ERASE, 46970356703232, 46970356703232, +STORE, 46970356703232, 46970356776959, +STORE, 46970356776960, 46970356793343, +STORE, 46970356756480, 46970356776959, +STORE, 46970356703232, 46970356756479, +ERASE, 46970356703232, 46970356703232, +STORE, 46970356703232, 46970356756479, +STORE, 46970356772864, 46970356776959, +STORE, 46970356756480, 46970356772863, +ERASE, 46970356756480, 46970356756480, +STORE, 46970356756480, 46970356772863, +STORE, 46970356785152, 46970356793343, +STORE, 46970356776960, 46970356785151, +ERASE, 46970356776960, 46970356776960, +STORE, 46970356776960, 46970356785151, +ERASE, 46970356785152, 46970356785152, +STORE, 46970356785152, 46970356793343, +STORE, 46970356793344, 46970358632447, +STORE, 46970356932608, 46970358632447, +STORE, 46970356793344, 46970356932607, +ERASE, 46970356932608, 46970356932608, +STORE, 46970356932608, 46970358591487, +STORE, 46970358591488, 46970358632447, +STORE, 46970358276096, 46970358591487, +STORE, 46970356932608, 46970358276095, +ERASE, 46970356932608, 46970356932608, +STORE, 46970356932608, 46970358276095, +STORE, 46970358587392, 46970358591487, +STORE, 46970358276096, 46970358587391, +ERASE, 46970358276096, 46970358276096, +STORE, 46970358276096, 46970358587391, +STORE, 46970358616064, 46970358632447, +STORE, 46970358591488, 46970358616063, +ERASE, 46970358591488, 46970358591488, +STORE, 46970358591488, 46970358616063, +ERASE, 46970358616064, 46970358616064, +STORE, 46970358616064, 46970358632447, +STORE, 46970358616064, 46970358644735, +ERASE, 46970358591488, 46970358591488, +STORE, 46970358591488, 46970358607871, +STORE, 46970358607872, 46970358616063, +ERASE, 46970356776960, 46970356776960, +STORE, 46970356776960, 46970356781055, +STORE, 46970356781056, 46970356785151, +ERASE, 94121892900864, 94121892900864, +STORE, 94121892900864, 94121892917247, +STORE, 94121892917248, 94121892921343, +ERASE, 140662438486016, 140662438486016, +STORE, 140662438486016, 140662438490111, +STORE, 140662438490112, 140662438494207, +ERASE, 46970356670464, 46970356670464, +STORE, 94121898610688, 94121898745855, +STORE, 140737488347136, 140737488351231, +STORE, 140737189351424, 140737488351231, +ERASE, 140737189351424, 140737189351424, +STORE, 140737189351424, 140737189355519, +STORE, 93847948832768, 93847949545471, +ERASE, 93847948832768, 93847948832768, +STORE, 93847948832768, 93847948881919, +STORE, 93847948881920, 93847949545471, +ERASE, 93847948881920, 93847948881920, +STORE, 93847948881920, 93847949426687, +STORE, 93847949426688, 93847949524991, +STORE, 93847949524992, 93847949545471, +STORE, 139698989985792, 139698990157823, +ERASE, 139698989985792, 139698989985792, +STORE, 139698989985792, 139698989989887, +STORE, 139698989989888, 139698990157823, +ERASE, 139698989989888, 139698989989888, +STORE, 139698989989888, 139698990112767, +STORE, 139698990112768, 139698990145535, +STORE, 139698990145536, 139698990153727, +STORE, 139698990153728, 139698990157823, +STORE, 140737189744640, 140737189748735, +STORE, 140737189732352, 140737189744639, +STORE, 47933805010944, 47933805019135, +STORE, 47933805019136, 47933805027327, +STORE, 47933805027328, 47933805133823, +STORE, 47933805043712, 47933805133823, +STORE, 47933805027328, 47933805043711, +ERASE, 47933805043712, 47933805043712, +STORE, 47933805043712, 47933805117439, +STORE, 47933805117440, 47933805133823, +STORE, 47933805096960, 47933805117439, +STORE, 47933805043712, 47933805096959, +ERASE, 47933805043712, 47933805043712, +STORE, 47933805043712, 47933805096959, +STORE, 47933805113344, 47933805117439, +STORE, 47933805096960, 47933805113343, +ERASE, 47933805096960, 47933805096960, +STORE, 47933805096960, 47933805113343, +STORE, 47933805125632, 47933805133823, +STORE, 47933805117440, 47933805125631, +ERASE, 47933805117440, 47933805117440, +STORE, 47933805117440, 47933805125631, +ERASE, 47933805125632, 47933805125632, +STORE, 47933805125632, 47933805133823, +STORE, 47933805133824, 47933806972927, +STORE, 47933805273088, 47933806972927, +STORE, 47933805133824, 47933805273087, +ERASE, 47933805273088, 47933805273088, +STORE, 47933805273088, 47933806931967, +STORE, 47933806931968, 47933806972927, +STORE, 47933806616576, 47933806931967, +STORE, 47933805273088, 47933806616575, +ERASE, 47933805273088, 47933805273088, +STORE, 47933805273088, 47933806616575, +STORE, 47933806927872, 47933806931967, +STORE, 47933806616576, 47933806927871, +ERASE, 47933806616576, 47933806616576, +STORE, 47933806616576, 47933806927871, +STORE, 47933806956544, 47933806972927, +STORE, 47933806931968, 47933806956543, +ERASE, 47933806931968, 47933806931968, +STORE, 47933806931968, 47933806956543, +ERASE, 47933806956544, 47933806956544, +STORE, 47933806956544, 47933806972927, +STORE, 47933806956544, 47933806985215, +ERASE, 47933806931968, 47933806931968, +STORE, 47933806931968, 47933806948351, +STORE, 47933806948352, 47933806956543, +ERASE, 47933805117440, 47933805117440, +STORE, 47933805117440, 47933805121535, +STORE, 47933805121536, 47933805125631, +ERASE, 93847949524992, 93847949524992, +STORE, 93847949524992, 93847949541375, +STORE, 93847949541376, 93847949545471, +ERASE, 139698990145536, 139698990145536, +STORE, 139698990145536, 139698990149631, +STORE, 139698990149632, 139698990153727, +ERASE, 47933805010944, 47933805010944, +STORE, 140737488347136, 140737488351231, +STORE, 140725553991680, 140737488351231, +ERASE, 140725553991680, 140725553991680, +STORE, 140725553991680, 140725553995775, +STORE, 93980056248320, 93980056961023, +ERASE, 93980056248320, 93980056248320, +STORE, 93980056248320, 93980056297471, +STORE, 93980056297472, 93980056961023, +ERASE, 93980056297472, 93980056297472, +STORE, 93980056297472, 93980056842239, +STORE, 93980056842240, 93980056940543, +STORE, 93980056940544, 93980056961023, +STORE, 140146588971008, 140146589143039, +ERASE, 140146588971008, 140146588971008, +STORE, 140146588971008, 140146588975103, +STORE, 140146588975104, 140146589143039, +ERASE, 140146588975104, 140146588975104, +STORE, 140146588975104, 140146589097983, +STORE, 140146589097984, 140146589130751, +STORE, 140146589130752, 140146589138943, +STORE, 140146589138944, 140146589143039, +STORE, 140725554860032, 140725554864127, +STORE, 140725554847744, 140725554860031, +STORE, 47486206025728, 47486206033919, +STORE, 47486206033920, 47486206042111, +STORE, 47486206042112, 47486206148607, +STORE, 47486206058496, 47486206148607, +STORE, 47486206042112, 47486206058495, +ERASE, 47486206058496, 47486206058496, +STORE, 47486206058496, 47486206132223, +STORE, 47486206132224, 47486206148607, +STORE, 47486206111744, 47486206132223, +STORE, 47486206058496, 47486206111743, +ERASE, 47486206058496, 47486206058496, +STORE, 47486206058496, 47486206111743, +STORE, 47486206128128, 47486206132223, +STORE, 47486206111744, 47486206128127, +ERASE, 47486206111744, 47486206111744, +STORE, 47486206111744, 47486206128127, +STORE, 47486206140416, 47486206148607, +STORE, 47486206132224, 47486206140415, +ERASE, 47486206132224, 47486206132224, +STORE, 47486206132224, 47486206140415, +ERASE, 47486206140416, 47486206140416, +STORE, 47486206140416, 47486206148607, +STORE, 47486206148608, 47486207987711, +STORE, 47486206287872, 47486207987711, +STORE, 47486206148608, 47486206287871, +ERASE, 47486206287872, 47486206287872, +STORE, 47486206287872, 47486207946751, +STORE, 47486207946752, 47486207987711, +STORE, 47486207631360, 47486207946751, +STORE, 47486206287872, 47486207631359, +ERASE, 47486206287872, 47486206287872, +STORE, 47486206287872, 47486207631359, +STORE, 47486207942656, 47486207946751, +STORE, 47486207631360, 47486207942655, +ERASE, 47486207631360, 47486207631360, +STORE, 47486207631360, 47486207942655, +STORE, 47486207971328, 47486207987711, +STORE, 47486207946752, 47486207971327, +ERASE, 47486207946752, 47486207946752, +STORE, 47486207946752, 47486207971327, +ERASE, 47486207971328, 47486207971328, +STORE, 47486207971328, 47486207987711, +STORE, 47486207971328, 47486207999999, +ERASE, 47486207946752, 47486207946752, +STORE, 47486207946752, 47486207963135, +STORE, 47486207963136, 47486207971327, +ERASE, 47486206132224, 47486206132224, +STORE, 47486206132224, 47486206136319, +STORE, 47486206136320, 47486206140415, +ERASE, 93980056940544, 93980056940544, +STORE, 93980056940544, 93980056956927, +STORE, 93980056956928, 93980056961023, +ERASE, 140146589130752, 140146589130752, +STORE, 140146589130752, 140146589134847, +STORE, 140146589134848, 140146589138943, +ERASE, 47486206025728, 47486206025728, +STORE, 93980070006784, 93980070141951, +STORE, 140737488347136, 140737488351231, +STORE, 140727334776832, 140737488351231, +ERASE, 140727334776832, 140727334776832, +STORE, 140727334776832, 140727334780927, +STORE, 94049747247104, 94049747959807, +ERASE, 94049747247104, 94049747247104, +STORE, 94049747247104, 94049747296255, +STORE, 94049747296256, 94049747959807, +ERASE, 94049747296256, 94049747296256, +STORE, 94049747296256, 94049747841023, +STORE, 94049747841024, 94049747939327, +STORE, 94049747939328, 94049747959807, +STORE, 140227307216896, 140227307388927, +ERASE, 140227307216896, 140227307216896, +STORE, 140227307216896, 140227307220991, +STORE, 140227307220992, 140227307388927, +ERASE, 140227307220992, 140227307220992, +STORE, 140227307220992, 140227307343871, +STORE, 140227307343872, 140227307376639, +STORE, 140227307376640, 140227307384831, +STORE, 140227307384832, 140227307388927, +STORE, 140727335337984, 140727335342079, +STORE, 140727335325696, 140727335337983, +STORE, 47405487779840, 47405487788031, +STORE, 47405487788032, 47405487796223, +STORE, 47405487796224, 47405487902719, +STORE, 47405487812608, 47405487902719, +STORE, 47405487796224, 47405487812607, +ERASE, 47405487812608, 47405487812608, +STORE, 47405487812608, 47405487886335, +STORE, 47405487886336, 47405487902719, +STORE, 47405487865856, 47405487886335, +STORE, 47405487812608, 47405487865855, +ERASE, 47405487812608, 47405487812608, +STORE, 47405487812608, 47405487865855, +STORE, 47405487882240, 47405487886335, +STORE, 47405487865856, 47405487882239, +ERASE, 47405487865856, 47405487865856, +STORE, 47405487865856, 47405487882239, +STORE, 47405487894528, 47405487902719, +STORE, 47405487886336, 47405487894527, +ERASE, 47405487886336, 47405487886336, +STORE, 47405487886336, 47405487894527, +ERASE, 47405487894528, 47405487894528, +STORE, 47405487894528, 47405487902719, +STORE, 47405487902720, 47405489741823, +STORE, 47405488041984, 47405489741823, +STORE, 47405487902720, 47405488041983, +ERASE, 47405488041984, 47405488041984, +STORE, 47405488041984, 47405489700863, +STORE, 47405489700864, 47405489741823, +STORE, 47405489385472, 47405489700863, +STORE, 47405488041984, 47405489385471, +ERASE, 47405488041984, 47405488041984, +STORE, 47405488041984, 47405489385471, +STORE, 47405489696768, 47405489700863, +STORE, 47405489385472, 47405489696767, +ERASE, 47405489385472, 47405489385472, +STORE, 47405489385472, 47405489696767, +STORE, 47405489725440, 47405489741823, +STORE, 47405489700864, 47405489725439, +ERASE, 47405489700864, 47405489700864, +STORE, 47405489700864, 47405489725439, +ERASE, 47405489725440, 47405489725440, +STORE, 47405489725440, 47405489741823, +STORE, 47405489725440, 47405489754111, +ERASE, 47405489700864, 47405489700864, +STORE, 47405489700864, 47405489717247, +STORE, 47405489717248, 47405489725439, +ERASE, 47405487886336, 47405487886336, +STORE, 47405487886336, 47405487890431, +STORE, 47405487890432, 47405487894527, +ERASE, 94049747939328, 94049747939328, +STORE, 94049747939328, 94049747955711, +STORE, 94049747955712, 94049747959807, +ERASE, 140227307376640, 140227307376640, +STORE, 140227307376640, 140227307380735, +STORE, 140227307380736, 140227307384831, +ERASE, 47405487779840, 47405487779840, +STORE, 94049758810112, 94049758945279, +STORE, 140737488347136, 140737488351231, +STORE, 140727079718912, 140737488351231, +ERASE, 140727079718912, 140727079718912, +STORE, 140727079718912, 140727079723007, +STORE, 94250996527104, 94250997239807, +ERASE, 94250996527104, 94250996527104, +STORE, 94250996527104, 94250996576255, +STORE, 94250996576256, 94250997239807, +ERASE, 94250996576256, 94250996576256, +STORE, 94250996576256, 94250997121023, +STORE, 94250997121024, 94250997219327, +STORE, 94250997219328, 94250997239807, +STORE, 140060022587392, 140060022759423, +ERASE, 140060022587392, 140060022587392, +STORE, 140060022587392, 140060022591487, +STORE, 140060022591488, 140060022759423, +ERASE, 140060022591488, 140060022591488, +STORE, 140060022591488, 140060022714367, +STORE, 140060022714368, 140060022747135, +STORE, 140060022747136, 140060022755327, +STORE, 140060022755328, 140060022759423, +STORE, 140727079788544, 140727079792639, +STORE, 140727079776256, 140727079788543, +STORE, 47572772409344, 47572772417535, +STORE, 47572772417536, 47572772425727, +STORE, 47572772425728, 47572772532223, +STORE, 47572772442112, 47572772532223, +STORE, 47572772425728, 47572772442111, +ERASE, 47572772442112, 47572772442112, +STORE, 47572772442112, 47572772515839, +STORE, 47572772515840, 47572772532223, +STORE, 47572772495360, 47572772515839, +STORE, 47572772442112, 47572772495359, +ERASE, 47572772442112, 47572772442112, +STORE, 47572772442112, 47572772495359, +STORE, 47572772511744, 47572772515839, +STORE, 47572772495360, 47572772511743, +ERASE, 47572772495360, 47572772495360, +STORE, 47572772495360, 47572772511743, +STORE, 47572772524032, 47572772532223, +STORE, 47572772515840, 47572772524031, +ERASE, 47572772515840, 47572772515840, +STORE, 47572772515840, 47572772524031, +ERASE, 47572772524032, 47572772524032, +STORE, 47572772524032, 47572772532223, +STORE, 47572772532224, 47572774371327, +STORE, 47572772671488, 47572774371327, +STORE, 47572772532224, 47572772671487, +ERASE, 47572772671488, 47572772671488, +STORE, 47572772671488, 47572774330367, +STORE, 47572774330368, 47572774371327, +STORE, 47572774014976, 47572774330367, +STORE, 47572772671488, 47572774014975, +ERASE, 47572772671488, 47572772671488, +STORE, 47572772671488, 47572774014975, +STORE, 47572774326272, 47572774330367, +STORE, 47572774014976, 47572774326271, +ERASE, 47572774014976, 47572774014976, +STORE, 47572774014976, 47572774326271, +STORE, 47572774354944, 47572774371327, +STORE, 47572774330368, 47572774354943, +ERASE, 47572774330368, 47572774330368, +STORE, 47572774330368, 47572774354943, +ERASE, 47572774354944, 47572774354944, +STORE, 47572774354944, 47572774371327, +STORE, 47572774354944, 47572774383615, +ERASE, 47572774330368, 47572774330368, +STORE, 47572774330368, 47572774346751, +STORE, 47572774346752, 47572774354943, +ERASE, 47572772515840, 47572772515840, +STORE, 47572772515840, 47572772519935, +STORE, 47572772519936, 47572772524031, +ERASE, 94250997219328, 94250997219328, +STORE, 94250997219328, 94250997235711, +STORE, 94250997235712, 94250997239807, +ERASE, 140060022747136, 140060022747136, +STORE, 140060022747136, 140060022751231, +STORE, 140060022751232, 140060022755327, +ERASE, 47572772409344, 47572772409344, +STORE, 94251018305536, 94251018440703, +STORE, 140737488347136, 140737488351231, +STORE, 140730012389376, 140737488351231, +ERASE, 140730012389376, 140730012389376, +STORE, 140730012389376, 140730012393471, +STORE, 94382607675392, 94382607695871, +ERASE, 94382607675392, 94382607675392, +STORE, 94382607675392, 94382607679487, +STORE, 94382607679488, 94382607695871, +ERASE, 94382607679488, 94382607679488, +STORE, 94382607679488, 94382607683583, +STORE, 94382607683584, 94382607687679, +STORE, 94382607687680, 94382607695871, +STORE, 140252451454976, 140252451627007, +ERASE, 140252451454976, 140252451454976, +STORE, 140252451454976, 140252451459071, +STORE, 140252451459072, 140252451627007, +ERASE, 140252451459072, 140252451459072, +STORE, 140252451459072, 140252451581951, +STORE, 140252451581952, 140252451614719, +STORE, 140252451614720, 140252451622911, +STORE, 140252451622912, 140252451627007, +STORE, 140730013548544, 140730013552639, +STORE, 140730013536256, 140730013548543, +STORE, 47380343541760, 47380343549951, +STORE, 47380343549952, 47380343558143, +STORE, 47380343558144, 47380345397247, +STORE, 47380343697408, 47380345397247, +STORE, 47380343558144, 47380343697407, +ERASE, 47380343697408, 47380343697408, +STORE, 47380343697408, 47380345356287, +STORE, 47380345356288, 47380345397247, +STORE, 47380345040896, 47380345356287, +STORE, 47380343697408, 47380345040895, +ERASE, 47380343697408, 47380343697408, +STORE, 47380343697408, 47380345040895, +STORE, 47380345352192, 47380345356287, +STORE, 47380345040896, 47380345352191, +ERASE, 47380345040896, 47380345040896, +STORE, 47380345040896, 47380345352191, +STORE, 47380345380864, 47380345397247, +STORE, 47380345356288, 47380345380863, +ERASE, 47380345356288, 47380345356288, +STORE, 47380345356288, 47380345380863, +ERASE, 47380345380864, 47380345380864, +STORE, 47380345380864, 47380345397247, +ERASE, 47380345356288, 47380345356288, +STORE, 47380345356288, 47380345372671, +STORE, 47380345372672, 47380345380863, +ERASE, 94382607687680, 94382607687680, +STORE, 94382607687680, 94382607691775, +STORE, 94382607691776, 94382607695871, +ERASE, 140252451614720, 140252451614720, +STORE, 140252451614720, 140252451618815, +STORE, 140252451618816, 140252451622911, +ERASE, 47380343541760, 47380343541760, +STORE, 94382626803712, 94382626938879, +STORE, 140737488347136, 140737488351231, +STORE, 140730900271104, 140737488351231, +ERASE, 140730900271104, 140730900271104, +STORE, 140730900271104, 140730900275199, +STORE, 93855478120448, 93855478337535, +ERASE, 93855478120448, 93855478120448, +STORE, 93855478120448, 93855478198271, +STORE, 93855478198272, 93855478337535, +ERASE, 93855478198272, 93855478198272, +STORE, 93855478198272, 93855478243327, +STORE, 93855478243328, 93855478288383, +STORE, 93855478288384, 93855478337535, +STORE, 140092686573568, 140092686745599, +ERASE, 140092686573568, 140092686573568, +STORE, 140092686573568, 140092686577663, +STORE, 140092686577664, 140092686745599, +ERASE, 140092686577664, 140092686577664, +STORE, 140092686577664, 140092686700543, +STORE, 140092686700544, 140092686733311, +STORE, 140092686733312, 140092686741503, +STORE, 140092686741504, 140092686745599, +STORE, 140730900537344, 140730900541439, +STORE, 140730900525056, 140730900537343, +STORE, 47540108423168, 47540108431359, +STORE, 47540108431360, 47540108439551, +STORE, 47540108439552, 47540110278655, +STORE, 47540108578816, 47540110278655, +STORE, 47540108439552, 47540108578815, +ERASE, 47540108578816, 47540108578816, +STORE, 47540108578816, 47540110237695, +STORE, 47540110237696, 47540110278655, +STORE, 47540109922304, 47540110237695, +STORE, 47540108578816, 47540109922303, +ERASE, 47540108578816, 47540108578816, +STORE, 47540108578816, 47540109922303, +STORE, 47540110233600, 47540110237695, +STORE, 47540109922304, 47540110233599, +ERASE, 47540109922304, 47540109922304, +STORE, 47540109922304, 47540110233599, +STORE, 47540110262272, 47540110278655, +STORE, 47540110237696, 47540110262271, +ERASE, 47540110237696, 47540110237696, +STORE, 47540110237696, 47540110262271, +ERASE, 47540110262272, 47540110262272, +STORE, 47540110262272, 47540110278655, +ERASE, 47540110237696, 47540110237696, +STORE, 47540110237696, 47540110254079, +STORE, 47540110254080, 47540110262271, +ERASE, 93855478288384, 93855478288384, +STORE, 93855478288384, 93855478333439, +STORE, 93855478333440, 93855478337535, +ERASE, 140092686733312, 140092686733312, +STORE, 140092686733312, 140092686737407, +STORE, 140092686737408, 140092686741503, +ERASE, 47540108423168, 47540108423168, +STORE, 93855492222976, 93855492358143, +STORE, 93855492222976, 93855492493311, +STORE, 140737488347136, 140737488351231, +STORE, 140733498146816, 140737488351231, +ERASE, 140733498146816, 140733498146816, +STORE, 140733498146816, 140733498150911, +STORE, 94170739654656, 94170740367359, +ERASE, 94170739654656, 94170739654656, +STORE, 94170739654656, 94170739703807, +STORE, 94170739703808, 94170740367359, +ERASE, 94170739703808, 94170739703808, +STORE, 94170739703808, 94170740248575, +STORE, 94170740248576, 94170740346879, +STORE, 94170740346880, 94170740367359, +STORE, 140024788877312, 140024789049343, +ERASE, 140024788877312, 140024788877312, +STORE, 140024788877312, 140024788881407, +STORE, 140024788881408, 140024789049343, +ERASE, 140024788881408, 140024788881408, +STORE, 140024788881408, 140024789004287, +STORE, 140024789004288, 140024789037055, +STORE, 140024789037056, 140024789045247, +STORE, 140024789045248, 140024789049343, +STORE, 140733499023360, 140733499027455, +STORE, 140733499011072, 140733499023359, +STORE, 47608006119424, 47608006127615, +STORE, 47608006127616, 47608006135807, +STORE, 47608006135808, 47608006242303, +STORE, 47608006152192, 47608006242303, +STORE, 47608006135808, 47608006152191, +ERASE, 47608006152192, 47608006152192, +STORE, 47608006152192, 47608006225919, +STORE, 47608006225920, 47608006242303, +STORE, 47608006205440, 47608006225919, +STORE, 47608006152192, 47608006205439, +ERASE, 47608006152192, 47608006152192, +STORE, 47608006152192, 47608006205439, +STORE, 47608006221824, 47608006225919, +STORE, 47608006205440, 47608006221823, +ERASE, 47608006205440, 47608006205440, +STORE, 47608006205440, 47608006221823, +STORE, 47608006234112, 47608006242303, +STORE, 47608006225920, 47608006234111, +ERASE, 47608006225920, 47608006225920, +STORE, 47608006225920, 47608006234111, +ERASE, 47608006234112, 47608006234112, +STORE, 47608006234112, 47608006242303, +STORE, 47608006242304, 47608008081407, +STORE, 47608006381568, 47608008081407, +STORE, 47608006242304, 47608006381567, +ERASE, 47608006381568, 47608006381568, +STORE, 47608006381568, 47608008040447, +STORE, 47608008040448, 47608008081407, +STORE, 47608007725056, 47608008040447, +STORE, 47608006381568, 47608007725055, +ERASE, 47608006381568, 47608006381568, +STORE, 47608006381568, 47608007725055, +STORE, 47608008036352, 47608008040447, +STORE, 47608007725056, 47608008036351, +ERASE, 47608007725056, 47608007725056, +STORE, 47608007725056, 47608008036351, +STORE, 47608008065024, 47608008081407, +STORE, 47608008040448, 47608008065023, +ERASE, 47608008040448, 47608008040448, +STORE, 47608008040448, 47608008065023, +ERASE, 47608008065024, 47608008065024, +STORE, 47608008065024, 47608008081407, +STORE, 47608008065024, 47608008093695, +ERASE, 47608008040448, 47608008040448, +STORE, 47608008040448, 47608008056831, +STORE, 47608008056832, 47608008065023, +ERASE, 47608006225920, 47608006225920, +STORE, 47608006225920, 47608006230015, +STORE, 47608006230016, 47608006234111, +ERASE, 94170740346880, 94170740346880, +STORE, 94170740346880, 94170740363263, +STORE, 94170740363264, 94170740367359, +ERASE, 140024789037056, 140024789037056, +STORE, 140024789037056, 140024789041151, +STORE, 140024789041152, 140024789045247, +ERASE, 47608006119424, 47608006119424, +STORE, 140737488347136, 140737488351231, +STORE, 140730264326144, 140737488351231, +ERASE, 140730264326144, 140730264326144, +STORE, 140730264326144, 140730264330239, +STORE, 94653216407552, 94653217120255, +ERASE, 94653216407552, 94653216407552, +STORE, 94653216407552, 94653216456703, +STORE, 94653216456704, 94653217120255, +ERASE, 94653216456704, 94653216456704, +STORE, 94653216456704, 94653217001471, +STORE, 94653217001472, 94653217099775, +STORE, 94653217099776, 94653217120255, +STORE, 140103617011712, 140103617183743, +ERASE, 140103617011712, 140103617011712, +STORE, 140103617011712, 140103617015807, +STORE, 140103617015808, 140103617183743, +ERASE, 140103617015808, 140103617015808, +STORE, 140103617015808, 140103617138687, +STORE, 140103617138688, 140103617171455, +STORE, 140103617171456, 140103617179647, +STORE, 140103617179648, 140103617183743, +STORE, 140730265427968, 140730265432063, +STORE, 140730265415680, 140730265427967, +STORE, 47529177985024, 47529177993215, +STORE, 47529177993216, 47529178001407, +STORE, 47529178001408, 47529178107903, +STORE, 47529178017792, 47529178107903, +STORE, 47529178001408, 47529178017791, +ERASE, 47529178017792, 47529178017792, +STORE, 47529178017792, 47529178091519, +STORE, 47529178091520, 47529178107903, +STORE, 47529178071040, 47529178091519, +STORE, 47529178017792, 47529178071039, +ERASE, 47529178017792, 47529178017792, +STORE, 47529178017792, 47529178071039, +STORE, 47529178087424, 47529178091519, +STORE, 47529178071040, 47529178087423, +ERASE, 47529178071040, 47529178071040, +STORE, 47529178071040, 47529178087423, +STORE, 47529178099712, 47529178107903, +STORE, 47529178091520, 47529178099711, +ERASE, 47529178091520, 47529178091520, +STORE, 47529178091520, 47529178099711, +ERASE, 47529178099712, 47529178099712, +STORE, 47529178099712, 47529178107903, +STORE, 47529178107904, 47529179947007, +STORE, 47529178247168, 47529179947007, +STORE, 47529178107904, 47529178247167, +ERASE, 47529178247168, 47529178247168, +STORE, 47529178247168, 47529179906047, +STORE, 47529179906048, 47529179947007, +STORE, 47529179590656, 47529179906047, +STORE, 47529178247168, 47529179590655, +ERASE, 47529178247168, 47529178247168, +STORE, 47529178247168, 47529179590655, +STORE, 47529179901952, 47529179906047, +STORE, 47529179590656, 47529179901951, +ERASE, 47529179590656, 47529179590656, +STORE, 47529179590656, 47529179901951, +STORE, 47529179930624, 47529179947007, +STORE, 47529179906048, 47529179930623, +ERASE, 47529179906048, 47529179906048, +STORE, 47529179906048, 47529179930623, +ERASE, 47529179930624, 47529179930624, +STORE, 47529179930624, 47529179947007, +STORE, 47529179930624, 47529179959295, +ERASE, 47529179906048, 47529179906048, +STORE, 47529179906048, 47529179922431, +STORE, 47529179922432, 47529179930623, +ERASE, 47529178091520, 47529178091520, +STORE, 47529178091520, 47529178095615, +STORE, 47529178095616, 47529178099711, +ERASE, 94653217099776, 94653217099776, +STORE, 94653217099776, 94653217116159, +STORE, 94653217116160, 94653217120255, +ERASE, 140103617171456, 140103617171456, +STORE, 140103617171456, 140103617175551, +STORE, 140103617175552, 140103617179647, +ERASE, 47529177985024, 47529177985024, +STORE, 94653241135104, 94653241270271, +STORE, 140737488347136, 140737488351231, +STORE, 140736284549120, 140737488351231, +ERASE, 140736284549120, 140736284549120, +STORE, 140736284549120, 140736284553215, +STORE, 93963663822848, 93963664506879, +ERASE, 93963663822848, 93963663822848, +STORE, 93963663822848, 93963663884287, +STORE, 93963663884288, 93963664506879, +ERASE, 93963663884288, 93963663884288, +STORE, 93963663884288, 93963664240639, +STORE, 93963664240640, 93963664379903, +STORE, 93963664379904, 93963664506879, +STORE, 140450188439552, 140450188611583, +ERASE, 140450188439552, 140450188439552, +STORE, 140450188439552, 140450188443647, +STORE, 140450188443648, 140450188611583, +ERASE, 140450188443648, 140450188443648, +STORE, 140450188443648, 140450188566527, +STORE, 140450188566528, 140450188599295, +STORE, 140450188599296, 140450188607487, +STORE, 140450188607488, 140450188611583, +STORE, 140736284577792, 140736284581887, +STORE, 140736284565504, 140736284577791, +STORE, 47182606557184, 47182606565375, +STORE, 47182606565376, 47182606573567, +STORE, 47182606573568, 47182608412671, +STORE, 47182606712832, 47182608412671, +STORE, 47182606573568, 47182606712831, +ERASE, 47182606712832, 47182606712832, +STORE, 47182606712832, 47182608371711, +STORE, 47182608371712, 47182608412671, +STORE, 47182608056320, 47182608371711, +STORE, 47182606712832, 47182608056319, +ERASE, 47182606712832, 47182606712832, +STORE, 47182606712832, 47182608056319, +STORE, 47182608367616, 47182608371711, +STORE, 47182608056320, 47182608367615, +ERASE, 47182608056320, 47182608056320, +STORE, 47182608056320, 47182608367615, +STORE, 47182608396288, 47182608412671, +STORE, 47182608371712, 47182608396287, +ERASE, 47182608371712, 47182608371712, +STORE, 47182608371712, 47182608396287, +ERASE, 47182608396288, 47182608396288, +STORE, 47182608396288, 47182608412671, +STORE, 47182608412672, 47182608523263, +STORE, 47182608429056, 47182608523263, +STORE, 47182608412672, 47182608429055, +ERASE, 47182608429056, 47182608429056, +STORE, 47182608429056, 47182608515071, +STORE, 47182608515072, 47182608523263, +STORE, 47182608490496, 47182608515071, +STORE, 47182608429056, 47182608490495, +ERASE, 47182608429056, 47182608429056, +STORE, 47182608429056, 47182608490495, +STORE, 47182608510976, 47182608515071, +STORE, 47182608490496, 47182608510975, +ERASE, 47182608490496, 47182608490496, +STORE, 47182608490496, 47182608510975, +ERASE, 47182608515072, 47182608515072, +STORE, 47182608515072, 47182608523263, +STORE, 47182608523264, 47182608568319, +ERASE, 47182608523264, 47182608523264, +STORE, 47182608523264, 47182608531455, +STORE, 47182608531456, 47182608568319, +STORE, 47182608551936, 47182608568319, +STORE, 47182608531456, 47182608551935, +ERASE, 47182608531456, 47182608531456, +STORE, 47182608531456, 47182608551935, +STORE, 47182608560128, 47182608568319, +STORE, 47182608551936, 47182608560127, +ERASE, 47182608551936, 47182608551936, +STORE, 47182608551936, 47182608568319, +ERASE, 47182608551936, 47182608551936, +STORE, 47182608551936, 47182608560127, +STORE, 47182608560128, 47182608568319, +ERASE, 47182608560128, 47182608560128, +STORE, 47182608560128, 47182608568319, +STORE, 47182608568320, 47182608916479, +STORE, 47182608609280, 47182608916479, +STORE, 47182608568320, 47182608609279, +ERASE, 47182608609280, 47182608609280, +STORE, 47182608609280, 47182608891903, +STORE, 47182608891904, 47182608916479, +STORE, 47182608822272, 47182608891903, +STORE, 47182608609280, 47182608822271, +ERASE, 47182608609280, 47182608609280, +STORE, 47182608609280, 47182608822271, +STORE, 47182608887808, 47182608891903, +STORE, 47182608822272, 47182608887807, +ERASE, 47182608822272, 47182608822272, +STORE, 47182608822272, 47182608887807, +ERASE, 47182608891904, 47182608891904, +STORE, 47182608891904, 47182608916479, +STORE, 47182608916480, 47182611177471, +STORE, 47182609068032, 47182611177471, +STORE, 47182608916480, 47182609068031, +ERASE, 47182609068032, 47182609068032, +STORE, 47182609068032, 47182611161087, +STORE, 47182611161088, 47182611177471, +STORE, 47182611169280, 47182611177471, +STORE, 47182611161088, 47182611169279, +ERASE, 47182611161088, 47182611161088, +STORE, 47182611161088, 47182611169279, +ERASE, 47182611169280, 47182611169280, +STORE, 47182611169280, 47182611177471, +STORE, 47182611177472, 47182611312639, +ERASE, 47182611177472, 47182611177472, +STORE, 47182611177472, 47182611202047, +STORE, 47182611202048, 47182611312639, +STORE, 47182611263488, 47182611312639, +STORE, 47182611202048, 47182611263487, +ERASE, 47182611202048, 47182611202048, +STORE, 47182611202048, 47182611263487, +STORE, 47182611288064, 47182611312639, +STORE, 47182611263488, 47182611288063, +ERASE, 47182611263488, 47182611263488, +STORE, 47182611263488, 47182611312639, +ERASE, 47182611263488, 47182611263488, +STORE, 47182611263488, 47182611288063, +STORE, 47182611288064, 47182611312639, +STORE, 47182611296256, 47182611312639, +STORE, 47182611288064, 47182611296255, +ERASE, 47182611288064, 47182611288064, +STORE, 47182611288064, 47182611296255, +ERASE, 47182611296256, 47182611296256, +STORE, 47182611296256, 47182611312639, +STORE, 47182611296256, 47182611320831, +STORE, 47182611320832, 47182611484671, +ERASE, 47182611320832, 47182611320832, +STORE, 47182611320832, 47182611333119, +STORE, 47182611333120, 47182611484671, +STORE, 47182611431424, 47182611484671, +STORE, 47182611333120, 47182611431423, +ERASE, 47182611333120, 47182611333120, +STORE, 47182611333120, 47182611431423, +STORE, 47182611476480, 47182611484671, +STORE, 47182611431424, 47182611476479, +ERASE, 47182611431424, 47182611431424, +STORE, 47182611431424, 47182611484671, +ERASE, 47182611431424, 47182611431424, +STORE, 47182611431424, 47182611476479, +STORE, 47182611476480, 47182611484671, +ERASE, 47182611476480, 47182611476480, +STORE, 47182611476480, 47182611484671, +STORE, 47182611484672, 47182612082687, +STORE, 47182611603456, 47182612082687, +STORE, 47182611484672, 47182611603455, +ERASE, 47182611603456, 47182611603456, +STORE, 47182611603456, 47182612029439, +STORE, 47182612029440, 47182612082687, +STORE, 47182611918848, 47182612029439, +STORE, 47182611603456, 47182611918847, +ERASE, 47182611603456, 47182611603456, +STORE, 47182611603456, 47182611918847, +STORE, 47182612025344, 47182612029439, +STORE, 47182611918848, 47182612025343, +ERASE, 47182611918848, 47182611918848, +STORE, 47182611918848, 47182612025343, +ERASE, 47182612029440, 47182612029440, +STORE, 47182612029440, 47182612082687, +STORE, 47182612082688, 47182615134207, +STORE, 47182612627456, 47182615134207, +STORE, 47182612082688, 47182612627455, +ERASE, 47182612627456, 47182612627456, +STORE, 47182612627456, 47182614913023, +STORE, 47182614913024, 47182615134207, +STORE, 47182614323200, 47182614913023, +STORE, 47182612627456, 47182614323199, +ERASE, 47182612627456, 47182612627456, +STORE, 47182612627456, 47182614323199, +STORE, 47182614908928, 47182614913023, +STORE, 47182614323200, 47182614908927, +ERASE, 47182614323200, 47182614323200, +STORE, 47182614323200, 47182614908927, +STORE, 47182615117824, 47182615134207, +STORE, 47182614913024, 47182615117823, +ERASE, 47182614913024, 47182614913024, +STORE, 47182614913024, 47182615117823, +ERASE, 47182615117824, 47182615117824, +STORE, 47182615117824, 47182615134207, +STORE, 47182615134208, 47182615166975, +ERASE, 47182615134208, 47182615134208, +STORE, 47182615134208, 47182615142399, +STORE, 47182615142400, 47182615166975, +STORE, 47182615154688, 47182615166975, +STORE, 47182615142400, 47182615154687, +ERASE, 47182615142400, 47182615142400, +STORE, 47182615142400, 47182615154687, +STORE, 47182615158784, 47182615166975, +STORE, 47182615154688, 47182615158783, +ERASE, 47182615154688, 47182615154688, +STORE, 47182615154688, 47182615166975, +ERASE, 47182615154688, 47182615154688, +STORE, 47182615154688, 47182615158783, +STORE, 47182615158784, 47182615166975, +ERASE, 47182615158784, 47182615158784, +STORE, 47182615158784, 47182615166975, +STORE, 47182615166976, 47182615203839, +ERASE, 47182615166976, 47182615166976, +STORE, 47182615166976, 47182615175167, +STORE, 47182615175168, 47182615203839, +STORE, 47182615191552, 47182615203839, +STORE, 47182615175168, 47182615191551, +ERASE, 47182615175168, 47182615175168, +STORE, 47182615175168, 47182615191551, +STORE, 47182615195648, 47182615203839, +STORE, 47182615191552, 47182615195647, +ERASE, 47182615191552, 47182615191552, +STORE, 47182615191552, 47182615203839, +ERASE, 47182615191552, 47182615191552, +STORE, 47182615191552, 47182615195647, +STORE, 47182615195648, 47182615203839, +ERASE, 47182615195648, 47182615195648, +STORE, 47182615195648, 47182615203839, +STORE, 47182615203840, 47182615678975, +ERASE, 47182615203840, 47182615203840, +STORE, 47182615203840, 47182615212031, +STORE, 47182615212032, 47182615678975, +STORE, 47182615547904, 47182615678975, +STORE, 47182615212032, 47182615547903, +ERASE, 47182615212032, 47182615212032, +STORE, 47182615212032, 47182615547903, +STORE, 47182615670784, 47182615678975, +STORE, 47182615547904, 47182615670783, +ERASE, 47182615547904, 47182615547904, +STORE, 47182615547904, 47182615678975, +ERASE, 47182615547904, 47182615547904, +STORE, 47182615547904, 47182615670783, +STORE, 47182615670784, 47182615678975, +ERASE, 47182615670784, 47182615670784, +STORE, 47182615670784, 47182615678975, +STORE, 47182615678976, 47182615687167, +STORE, 47182615687168, 47182615707647, +ERASE, 47182615687168, 47182615687168, +STORE, 47182615687168, 47182615691263, +STORE, 47182615691264, 47182615707647, +STORE, 47182615695360, 47182615707647, +STORE, 47182615691264, 47182615695359, +ERASE, 47182615691264, 47182615691264, +STORE, 47182615691264, 47182615695359, +STORE, 47182615699456, 47182615707647, +STORE, 47182615695360, 47182615699455, +ERASE, 47182615695360, 47182615695360, +STORE, 47182615695360, 47182615707647, +ERASE, 47182615695360, 47182615695360, +STORE, 47182615695360, 47182615699455, +STORE, 47182615699456, 47182615707647, +ERASE, 47182615699456, 47182615699456, +STORE, 47182615699456, 47182615707647, +STORE, 47182615707648, 47182615715839, +ERASE, 47182608371712, 47182608371712, +STORE, 47182608371712, 47182608388095, +STORE, 47182608388096, 47182608396287, +ERASE, 47182615699456, 47182615699456, +STORE, 47182615699456, 47182615703551, +STORE, 47182615703552, 47182615707647, +ERASE, 47182611288064, 47182611288064, +STORE, 47182611288064, 47182611292159, +STORE, 47182611292160, 47182611296255, +ERASE, 47182615670784, 47182615670784, +STORE, 47182615670784, 47182615674879, +STORE, 47182615674880, 47182615678975, +ERASE, 47182615195648, 47182615195648, +STORE, 47182615195648, 47182615199743, +STORE, 47182615199744, 47182615203839, +ERASE, 47182615158784, 47182615158784, +STORE, 47182615158784, 47182615162879, +STORE, 47182615162880, 47182615166975, +ERASE, 47182614913024, 47182614913024, +STORE, 47182614913024, 47182615109631, +STORE, 47182615109632, 47182615117823, +ERASE, 47182612029440, 47182612029440, +STORE, 47182612029440, 47182612066303, +STORE, 47182612066304, 47182612082687, +ERASE, 47182611476480, 47182611476480, +STORE, 47182611476480, 47182611480575, +STORE, 47182611480576, 47182611484671, +ERASE, 47182611161088, 47182611161088, +STORE, 47182611161088, 47182611165183, +STORE, 47182611165184, 47182611169279, +ERASE, 47182608891904, 47182608891904, +STORE, 47182608891904, 47182608912383, +STORE, 47182608912384, 47182608916479, +ERASE, 47182608560128, 47182608560128, +STORE, 47182608560128, 47182608564223, +STORE, 47182608564224, 47182608568319, +ERASE, 47182608515072, 47182608515072, +STORE, 47182608515072, 47182608519167, +STORE, 47182608519168, 47182608523263, +ERASE, 93963664379904, 93963664379904, +STORE, 93963664379904, 93963664502783, +STORE, 93963664502784, 93963664506879, +ERASE, 140450188599296, 140450188599296, +STORE, 140450188599296, 140450188603391, +STORE, 140450188603392, 140450188607487, +ERASE, 47182606557184, 47182606557184, +STORE, 93963694723072, 93963694858239, +STORE, 140737488347136, 140737488351231, +STORE, 140730313261056, 140737488351231, +ERASE, 140730313261056, 140730313261056, +STORE, 140730313261056, 140730313265151, +STORE, 94386579017728, 94386579697663, +ERASE, 94386579017728, 94386579017728, +STORE, 94386579017728, 94386579083263, +STORE, 94386579083264, 94386579697663, +ERASE, 94386579083264, 94386579083264, +STORE, 94386579083264, 94386579431423, +STORE, 94386579431424, 94386579570687, +STORE, 94386579570688, 94386579697663, +STORE, 140124810838016, 140124811010047, +ERASE, 140124810838016, 140124810838016, +STORE, 140124810838016, 140124810842111, +STORE, 140124810842112, 140124811010047, +ERASE, 140124810842112, 140124810842112, +STORE, 140124810842112, 140124810964991, +STORE, 140124810964992, 140124810997759, +STORE, 140124810997760, 140124811005951, +STORE, 140124811005952, 140124811010047, +STORE, 140730313601024, 140730313605119, +STORE, 140730313588736, 140730313601023, +STORE, 47507984158720, 47507984166911, +STORE, 47507984166912, 47507984175103, +STORE, 47507984175104, 47507986014207, +STORE, 47507984314368, 47507986014207, +STORE, 47507984175104, 47507984314367, +ERASE, 47507984314368, 47507984314368, +STORE, 47507984314368, 47507985973247, +STORE, 47507985973248, 47507986014207, +STORE, 47507985657856, 47507985973247, +STORE, 47507984314368, 47507985657855, +ERASE, 47507984314368, 47507984314368, +STORE, 47507984314368, 47507985657855, +STORE, 47507985969152, 47507985973247, +STORE, 47507985657856, 47507985969151, +ERASE, 47507985657856, 47507985657856, +STORE, 47507985657856, 47507985969151, +STORE, 47507985997824, 47507986014207, +STORE, 47507985973248, 47507985997823, +ERASE, 47507985973248, 47507985973248, +STORE, 47507985973248, 47507985997823, +ERASE, 47507985997824, 47507985997824, +STORE, 47507985997824, 47507986014207, +STORE, 47507986014208, 47507986124799, +STORE, 47507986030592, 47507986124799, +STORE, 47507986014208, 47507986030591, +ERASE, 47507986030592, 47507986030592, +STORE, 47507986030592, 47507986116607, +STORE, 47507986116608, 47507986124799, +STORE, 47507986092032, 47507986116607, +STORE, 47507986030592, 47507986092031, +ERASE, 47507986030592, 47507986030592, +STORE, 47507986030592, 47507986092031, +STORE, 47507986112512, 47507986116607, +STORE, 47507986092032, 47507986112511, +ERASE, 47507986092032, 47507986092032, +STORE, 47507986092032, 47507986112511, +ERASE, 47507986116608, 47507986116608, +STORE, 47507986116608, 47507986124799, +STORE, 47507986124800, 47507986169855, +ERASE, 47507986124800, 47507986124800, +STORE, 47507986124800, 47507986132991, +STORE, 47507986132992, 47507986169855, +STORE, 47507986153472, 47507986169855, +STORE, 47507986132992, 47507986153471, +ERASE, 47507986132992, 47507986132992, +STORE, 47507986132992, 47507986153471, +STORE, 47507986161664, 47507986169855, +STORE, 47507986153472, 47507986161663, +ERASE, 47507986153472, 47507986153472, +STORE, 47507986153472, 47507986169855, +ERASE, 47507986153472, 47507986153472, +STORE, 47507986153472, 47507986161663, +STORE, 47507986161664, 47507986169855, +ERASE, 47507986161664, 47507986161664, +STORE, 47507986161664, 47507986169855, +STORE, 47507986169856, 47507986518015, +STORE, 47507986210816, 47507986518015, +STORE, 47507986169856, 47507986210815, +ERASE, 47507986210816, 47507986210816, +STORE, 47507986210816, 47507986493439, +STORE, 47507986493440, 47507986518015, +STORE, 47507986423808, 47507986493439, +STORE, 47507986210816, 47507986423807, +ERASE, 47507986210816, 47507986210816, +STORE, 47507986210816, 47507986423807, +STORE, 47507986489344, 47507986493439, +STORE, 47507986423808, 47507986489343, +ERASE, 47507986423808, 47507986423808, +STORE, 47507986423808, 47507986489343, +ERASE, 47507986493440, 47507986493440, +STORE, 47507986493440, 47507986518015, +STORE, 47507986518016, 47507988779007, +STORE, 47507986669568, 47507988779007, +STORE, 47507986518016, 47507986669567, +ERASE, 47507986669568, 47507986669568, +STORE, 47507986669568, 47507988762623, +STORE, 47507988762624, 47507988779007, +STORE, 47507988770816, 47507988779007, +STORE, 47507988762624, 47507988770815, +ERASE, 47507988762624, 47507988762624, +STORE, 47507988762624, 47507988770815, +ERASE, 47507988770816, 47507988770816, +STORE, 47507988770816, 47507988779007, +STORE, 47507988779008, 47507988914175, +ERASE, 47507988779008, 47507988779008, +STORE, 47507988779008, 47507988803583, +STORE, 47507988803584, 47507988914175, +STORE, 47507988865024, 47507988914175, +STORE, 47507988803584, 47507988865023, +ERASE, 47507988803584, 47507988803584, +STORE, 47507988803584, 47507988865023, +STORE, 47507988889600, 47507988914175, +STORE, 47507988865024, 47507988889599, +ERASE, 47507988865024, 47507988865024, +STORE, 47507988865024, 47507988914175, +ERASE, 47507988865024, 47507988865024, +STORE, 47507988865024, 47507988889599, +STORE, 47507988889600, 47507988914175, +STORE, 47507988897792, 47507988914175, +STORE, 47507988889600, 47507988897791, +ERASE, 47507988889600, 47507988889600, +STORE, 47507988889600, 47507988897791, +ERASE, 47507988897792, 47507988897792, +STORE, 47507988897792, 47507988914175, +STORE, 47507988897792, 47507988922367, +STORE, 47507988922368, 47507989086207, +ERASE, 47507988922368, 47507988922368, +STORE, 47507988922368, 47507988934655, +STORE, 47507988934656, 47507989086207, +STORE, 47507989032960, 47507989086207, +STORE, 47507988934656, 47507989032959, +ERASE, 47507988934656, 47507988934656, +STORE, 47507988934656, 47507989032959, +STORE, 47507989078016, 47507989086207, +STORE, 47507989032960, 47507989078015, +ERASE, 47507989032960, 47507989032960, +STORE, 47507989032960, 47507989086207, +ERASE, 47507989032960, 47507989032960, +STORE, 47507989032960, 47507989078015, +STORE, 47507989078016, 47507989086207, +ERASE, 47507989078016, 47507989078016, +STORE, 47507989078016, 47507989086207, +STORE, 47507989086208, 47507989684223, +STORE, 47507989204992, 47507989684223, +STORE, 47507989086208, 47507989204991, +ERASE, 47507989204992, 47507989204992, +STORE, 47507989204992, 47507989630975, +STORE, 47507989630976, 47507989684223, +STORE, 47507989520384, 47507989630975, +STORE, 47507989204992, 47507989520383, +ERASE, 47507989204992, 47507989204992, +STORE, 47507989204992, 47507989520383, +STORE, 47507989626880, 47507989630975, +STORE, 47507989520384, 47507989626879, +ERASE, 47507989520384, 47507989520384, +STORE, 47507989520384, 47507989626879, +ERASE, 47507989630976, 47507989630976, +STORE, 47507989630976, 47507989684223, +STORE, 47507989684224, 47507992735743, +STORE, 47507990228992, 47507992735743, +STORE, 47507989684224, 47507990228991, +ERASE, 47507990228992, 47507990228992, +STORE, 47507990228992, 47507992514559, +STORE, 47507992514560, 47507992735743, +STORE, 47507991924736, 47507992514559, +STORE, 47507990228992, 47507991924735, +ERASE, 47507990228992, 47507990228992, +STORE, 47507990228992, 47507991924735, +STORE, 47507992510464, 47507992514559, +STORE, 47507991924736, 47507992510463, +ERASE, 47507991924736, 47507991924736, +STORE, 47507991924736, 47507992510463, +STORE, 47507992719360, 47507992735743, +STORE, 47507992514560, 47507992719359, +ERASE, 47507992514560, 47507992514560, +STORE, 47507992514560, 47507992719359, +ERASE, 47507992719360, 47507992719360, +STORE, 47507992719360, 47507992735743, +STORE, 47507992735744, 47507992768511, +ERASE, 47507992735744, 47507992735744, +STORE, 47507992735744, 47507992743935, +STORE, 47507992743936, 47507992768511, +STORE, 47507992756224, 47507992768511, +STORE, 47507992743936, 47507992756223, +ERASE, 47507992743936, 47507992743936, +STORE, 47507992743936, 47507992756223, +STORE, 47507992760320, 47507992768511, +STORE, 47507992756224, 47507992760319, +ERASE, 47507992756224, 47507992756224, +STORE, 47507992756224, 47507992768511, +ERASE, 47507992756224, 47507992756224, +STORE, 47507992756224, 47507992760319, +STORE, 47507992760320, 47507992768511, +ERASE, 47507992760320, 47507992760320, +STORE, 47507992760320, 47507992768511, +STORE, 47507992768512, 47507992805375, +ERASE, 47507992768512, 47507992768512, +STORE, 47507992768512, 47507992776703, +STORE, 47507992776704, 47507992805375, +STORE, 47507992793088, 47507992805375, +STORE, 47507992776704, 47507992793087, +ERASE, 47507992776704, 47507992776704, +STORE, 47507992776704, 47507992793087, +STORE, 47507992797184, 47507992805375, +STORE, 47507992793088, 47507992797183, +ERASE, 47507992793088, 47507992793088, +STORE, 47507992793088, 47507992805375, +ERASE, 47507992793088, 47507992793088, +STORE, 47507992793088, 47507992797183, +STORE, 47507992797184, 47507992805375, +ERASE, 47507992797184, 47507992797184, +STORE, 47507992797184, 47507992805375, +STORE, 47507992805376, 47507993280511, +ERASE, 47507992805376, 47507992805376, +STORE, 47507992805376, 47507992813567, +STORE, 47507992813568, 47507993280511, +STORE, 47507993149440, 47507993280511, +STORE, 47507992813568, 47507993149439, +ERASE, 47507992813568, 47507992813568, +STORE, 47507992813568, 47507993149439, +STORE, 47507993272320, 47507993280511, +STORE, 47507993149440, 47507993272319, +ERASE, 47507993149440, 47507993149440, +STORE, 47507993149440, 47507993280511, +ERASE, 47507993149440, 47507993149440, +STORE, 47507993149440, 47507993272319, +STORE, 47507993272320, 47507993280511, +ERASE, 47507993272320, 47507993272320, +STORE, 47507993272320, 47507993280511, +STORE, 47507993280512, 47507993288703, +STORE, 47507993288704, 47507993309183, +ERASE, 47507993288704, 47507993288704, +STORE, 47507993288704, 47507993292799, +STORE, 47507993292800, 47507993309183, +STORE, 47507993296896, 47507993309183, +STORE, 47507993292800, 47507993296895, +ERASE, 47507993292800, 47507993292800, +STORE, 47507993292800, 47507993296895, +STORE, 47507993300992, 47507993309183, +STORE, 47507993296896, 47507993300991, +ERASE, 47507993296896, 47507993296896, +STORE, 47507993296896, 47507993309183, +ERASE, 47507993296896, 47507993296896, +STORE, 47507993296896, 47507993300991, +STORE, 47507993300992, 47507993309183, +ERASE, 47507993300992, 47507993300992, +STORE, 47507993300992, 47507993309183, +STORE, 47507993309184, 47507993317375, +ERASE, 47507985973248, 47507985973248, +STORE, 47507985973248, 47507985989631, +STORE, 47507985989632, 47507985997823, +ERASE, 47507993300992, 47507993300992, +STORE, 47507993300992, 47507993305087, +STORE, 47507993305088, 47507993309183, +ERASE, 47507988889600, 47507988889600, +STORE, 47507988889600, 47507988893695, +STORE, 47507988893696, 47507988897791, +ERASE, 47507993272320, 47507993272320, +STORE, 47507993272320, 47507993276415, +STORE, 47507993276416, 47507993280511, +ERASE, 47507992797184, 47507992797184, +STORE, 47507992797184, 47507992801279, +STORE, 47507992801280, 47507992805375, +ERASE, 47507992760320, 47507992760320, +STORE, 47507992760320, 47507992764415, +STORE, 47507992764416, 47507992768511, +ERASE, 47507992514560, 47507992514560, +STORE, 47507992514560, 47507992711167, +STORE, 47507992711168, 47507992719359, +ERASE, 47507989630976, 47507989630976, +STORE, 47507989630976, 47507989667839, +STORE, 47507989667840, 47507989684223, +ERASE, 47507989078016, 47507989078016, +STORE, 47507989078016, 47507989082111, +STORE, 47507989082112, 47507989086207, +ERASE, 47507988762624, 47507988762624, +STORE, 47507988762624, 47507988766719, +STORE, 47507988766720, 47507988770815, +ERASE, 47507986493440, 47507986493440, +STORE, 47507986493440, 47507986513919, +STORE, 47507986513920, 47507986518015, +ERASE, 47507986161664, 47507986161664, +STORE, 47507986161664, 47507986165759, +STORE, 47507986165760, 47507986169855, +ERASE, 47507986116608, 47507986116608, +STORE, 47507986116608, 47507986120703, +STORE, 47507986120704, 47507986124799, +ERASE, 94386579570688, 94386579570688, +STORE, 94386579570688, 94386579693567, +STORE, 94386579693568, 94386579697663, +ERASE, 140124810997760, 140124810997760, +STORE, 140124810997760, 140124811001855, +STORE, 140124811001856, 140124811005951, +ERASE, 47507984158720, 47507984158720, +STORE, 94386583982080, 94386584117247, +STORE, 94386583982080, 94386584256511, +ERASE, 94386583982080, 94386583982080, +STORE, 94386583982080, 94386584223743, +STORE, 94386584223744, 94386584256511, +ERASE, 94386584223744, 94386584223744, +STORE, 140737488347136, 140737488351231, +STORE, 140733763395584, 140737488351231, +ERASE, 140733763395584, 140733763395584, +STORE, 140733763395584, 140733763399679, +STORE, 94011546472448, 94011547152383, +ERASE, 94011546472448, 94011546472448, +STORE, 94011546472448, 94011546537983, +STORE, 94011546537984, 94011547152383, +ERASE, 94011546537984, 94011546537984, +STORE, 94011546537984, 94011546886143, +STORE, 94011546886144, 94011547025407, +STORE, 94011547025408, 94011547152383, +STORE, 139757597949952, 139757598121983, +ERASE, 139757597949952, 139757597949952, +STORE, 139757597949952, 139757597954047, +STORE, 139757597954048, 139757598121983, +ERASE, 139757597954048, 139757597954048, +STORE, 139757597954048, 139757598076927, +STORE, 139757598076928, 139757598109695, +STORE, 139757598109696, 139757598117887, +STORE, 139757598117888, 139757598121983, +STORE, 140733763596288, 140733763600383, +STORE, 140733763584000, 140733763596287, +STORE, 47875197046784, 47875197054975, +STORE, 47875197054976, 47875197063167, +STORE, 47875197063168, 47875198902271, +STORE, 47875197202432, 47875198902271, +STORE, 47875197063168, 47875197202431, +ERASE, 47875197202432, 47875197202432, +STORE, 47875197202432, 47875198861311, +STORE, 47875198861312, 47875198902271, +STORE, 47875198545920, 47875198861311, +STORE, 47875197202432, 47875198545919, +ERASE, 47875197202432, 47875197202432, +STORE, 47875197202432, 47875198545919, +STORE, 47875198857216, 47875198861311, +STORE, 47875198545920, 47875198857215, +ERASE, 47875198545920, 47875198545920, +STORE, 47875198545920, 47875198857215, +STORE, 47875198885888, 47875198902271, +STORE, 47875198861312, 47875198885887, +ERASE, 47875198861312, 47875198861312, +STORE, 47875198861312, 47875198885887, +ERASE, 47875198885888, 47875198885888, +STORE, 47875198885888, 47875198902271, +STORE, 47875198902272, 47875199012863, +STORE, 47875198918656, 47875199012863, +STORE, 47875198902272, 47875198918655, +ERASE, 47875198918656, 47875198918656, +STORE, 47875198918656, 47875199004671, +STORE, 47875199004672, 47875199012863, +STORE, 47875198980096, 47875199004671, +STORE, 47875198918656, 47875198980095, +ERASE, 47875198918656, 47875198918656, +STORE, 47875198918656, 47875198980095, +STORE, 47875199000576, 47875199004671, +STORE, 47875198980096, 47875199000575, +ERASE, 47875198980096, 47875198980096, +STORE, 47875198980096, 47875199000575, +ERASE, 47875199004672, 47875199004672, +STORE, 47875199004672, 47875199012863, +STORE, 47875199012864, 47875199057919, +ERASE, 47875199012864, 47875199012864, +STORE, 47875199012864, 47875199021055, +STORE, 47875199021056, 47875199057919, +STORE, 47875199041536, 47875199057919, +STORE, 47875199021056, 47875199041535, +ERASE, 47875199021056, 47875199021056, +STORE, 47875199021056, 47875199041535, +STORE, 47875199049728, 47875199057919, +STORE, 47875199041536, 47875199049727, +ERASE, 47875199041536, 47875199041536, +STORE, 47875199041536, 47875199057919, +ERASE, 47875199041536, 47875199041536, +STORE, 47875199041536, 47875199049727, +STORE, 47875199049728, 47875199057919, +ERASE, 47875199049728, 47875199049728, +STORE, 47875199049728, 47875199057919, +STORE, 47875199057920, 47875199406079, +STORE, 47875199098880, 47875199406079, +STORE, 47875199057920, 47875199098879, +ERASE, 47875199098880, 47875199098880, +STORE, 47875199098880, 47875199381503, +STORE, 47875199381504, 47875199406079, +STORE, 47875199311872, 47875199381503, +STORE, 47875199098880, 47875199311871, +ERASE, 47875199098880, 47875199098880, +STORE, 47875199098880, 47875199311871, +STORE, 47875199377408, 47875199381503, +STORE, 47875199311872, 47875199377407, +ERASE, 47875199311872, 47875199311872, +STORE, 47875199311872, 47875199377407, +ERASE, 47875199381504, 47875199381504, +STORE, 47875199381504, 47875199406079, +STORE, 47875199406080, 47875201667071, +STORE, 47875199557632, 47875201667071, +STORE, 47875199406080, 47875199557631, +ERASE, 47875199557632, 47875199557632, +STORE, 47875199557632, 47875201650687, +STORE, 47875201650688, 47875201667071, +STORE, 47875201658880, 47875201667071, +STORE, 47875201650688, 47875201658879, +ERASE, 47875201650688, 47875201650688, +STORE, 47875201650688, 47875201658879, +ERASE, 47875201658880, 47875201658880, +STORE, 47875201658880, 47875201667071, +STORE, 47875201667072, 47875201802239, +ERASE, 47875201667072, 47875201667072, +STORE, 47875201667072, 47875201691647, +STORE, 47875201691648, 47875201802239, +STORE, 47875201753088, 47875201802239, +STORE, 47875201691648, 47875201753087, +ERASE, 47875201691648, 47875201691648, +STORE, 47875201691648, 47875201753087, +STORE, 47875201777664, 47875201802239, +STORE, 47875201753088, 47875201777663, +ERASE, 47875201753088, 47875201753088, +STORE, 47875201753088, 47875201802239, +ERASE, 47875201753088, 47875201753088, +STORE, 47875201753088, 47875201777663, +STORE, 47875201777664, 47875201802239, +STORE, 47875201785856, 47875201802239, +STORE, 47875201777664, 47875201785855, +ERASE, 47875201777664, 47875201777664, +STORE, 47875201777664, 47875201785855, +ERASE, 47875201785856, 47875201785856, +STORE, 47875201785856, 47875201802239, +STORE, 47875201785856, 47875201810431, +STORE, 47875201810432, 47875201974271, +ERASE, 47875201810432, 47875201810432, +STORE, 47875201810432, 47875201822719, +STORE, 47875201822720, 47875201974271, +STORE, 47875201921024, 47875201974271, +STORE, 47875201822720, 47875201921023, +ERASE, 47875201822720, 47875201822720, +STORE, 47875201822720, 47875201921023, +STORE, 47875201966080, 47875201974271, +STORE, 47875201921024, 47875201966079, +ERASE, 47875201921024, 47875201921024, +STORE, 47875201921024, 47875201974271, +ERASE, 47875201921024, 47875201921024, +STORE, 47875201921024, 47875201966079, +STORE, 47875201966080, 47875201974271, +ERASE, 47875201966080, 47875201966080, +STORE, 47875201966080, 47875201974271, +STORE, 47875201974272, 47875202572287, +STORE, 47875202093056, 47875202572287, +STORE, 47875201974272, 47875202093055, +ERASE, 47875202093056, 47875202093056, +STORE, 47875202093056, 47875202519039, +STORE, 47875202519040, 47875202572287, +STORE, 47875202408448, 47875202519039, +STORE, 47875202093056, 47875202408447, +ERASE, 47875202093056, 47875202093056, +STORE, 47875202093056, 47875202408447, +STORE, 47875202514944, 47875202519039, +STORE, 47875202408448, 47875202514943, +ERASE, 47875202408448, 47875202408448, +STORE, 47875202408448, 47875202514943, +ERASE, 47875202519040, 47875202519040, +STORE, 47875202519040, 47875202572287, +STORE, 47875202572288, 47875205623807, +STORE, 47875203117056, 47875205623807, +STORE, 47875202572288, 47875203117055, +ERASE, 47875203117056, 47875203117056, +STORE, 47875203117056, 47875205402623, +STORE, 47875205402624, 47875205623807, +STORE, 47875204812800, 47875205402623, +STORE, 47875203117056, 47875204812799, +ERASE, 47875203117056, 47875203117056, +STORE, 47875203117056, 47875204812799, +STORE, 47875205398528, 47875205402623, +STORE, 47875204812800, 47875205398527, +ERASE, 47875204812800, 47875204812800, +STORE, 47875204812800, 47875205398527, +STORE, 47875205607424, 47875205623807, +STORE, 47875205402624, 47875205607423, +ERASE, 47875205402624, 47875205402624, +STORE, 47875205402624, 47875205607423, +ERASE, 47875205607424, 47875205607424, +STORE, 47875205607424, 47875205623807, +STORE, 47875205623808, 47875205656575, +ERASE, 47875205623808, 47875205623808, +STORE, 47875205623808, 47875205631999, +STORE, 47875205632000, 47875205656575, +STORE, 47875205644288, 47875205656575, +STORE, 47875205632000, 47875205644287, +ERASE, 47875205632000, 47875205632000, +STORE, 47875205632000, 47875205644287, +STORE, 47875205648384, 47875205656575, +STORE, 47875205644288, 47875205648383, +ERASE, 47875205644288, 47875205644288, +STORE, 47875205644288, 47875205656575, +ERASE, 47875205644288, 47875205644288, +STORE, 47875205644288, 47875205648383, +STORE, 47875205648384, 47875205656575, +ERASE, 47875205648384, 47875205648384, +STORE, 47875205648384, 47875205656575, +STORE, 47875205656576, 47875205693439, +ERASE, 47875205656576, 47875205656576, +STORE, 47875205656576, 47875205664767, +STORE, 47875205664768, 47875205693439, +STORE, 47875205681152, 47875205693439, +STORE, 47875205664768, 47875205681151, +ERASE, 47875205664768, 47875205664768, +STORE, 47875205664768, 47875205681151, +STORE, 47875205685248, 47875205693439, +STORE, 47875205681152, 47875205685247, +ERASE, 47875205681152, 47875205681152, +STORE, 47875205681152, 47875205693439, +ERASE, 47875205681152, 47875205681152, +STORE, 47875205681152, 47875205685247, +STORE, 47875205685248, 47875205693439, +ERASE, 47875205685248, 47875205685248, +STORE, 47875205685248, 47875205693439, +STORE, 47875205693440, 47875206168575, +ERASE, 47875205693440, 47875205693440, +STORE, 47875205693440, 47875205701631, +STORE, 47875205701632, 47875206168575, +STORE, 47875206037504, 47875206168575, +STORE, 47875205701632, 47875206037503, +ERASE, 47875205701632, 47875205701632, +STORE, 47875205701632, 47875206037503, +STORE, 47875206160384, 47875206168575, +STORE, 47875206037504, 47875206160383, +ERASE, 47875206037504, 47875206037504, +STORE, 47875206037504, 47875206168575, +ERASE, 47875206037504, 47875206037504, +STORE, 47875206037504, 47875206160383, +STORE, 47875206160384, 47875206168575, +ERASE, 47875206160384, 47875206160384, +STORE, 47875206160384, 47875206168575, +STORE, 47875206168576, 47875206176767, +STORE, 47875206176768, 47875206197247, +ERASE, 47875206176768, 47875206176768, +STORE, 47875206176768, 47875206180863, +STORE, 47875206180864, 47875206197247, +STORE, 47875206184960, 47875206197247, +STORE, 47875206180864, 47875206184959, +ERASE, 47875206180864, 47875206180864, +STORE, 47875206180864, 47875206184959, +STORE, 47875206189056, 47875206197247, +STORE, 47875206184960, 47875206189055, +ERASE, 47875206184960, 47875206184960, +STORE, 47875206184960, 47875206197247, +ERASE, 47875206184960, 47875206184960, +STORE, 47875206184960, 47875206189055, +STORE, 47875206189056, 47875206197247, +ERASE, 47875206189056, 47875206189056, +STORE, 47875206189056, 47875206197247, +STORE, 47875206197248, 47875206205439, +ERASE, 47875198861312, 47875198861312, +STORE, 47875198861312, 47875198877695, +STORE, 47875198877696, 47875198885887, +ERASE, 47875206189056, 47875206189056, +STORE, 47875206189056, 47875206193151, +STORE, 47875206193152, 47875206197247, +ERASE, 47875201777664, 47875201777664, +STORE, 47875201777664, 47875201781759, +STORE, 47875201781760, 47875201785855, +ERASE, 47875206160384, 47875206160384, +STORE, 47875206160384, 47875206164479, +STORE, 47875206164480, 47875206168575, +ERASE, 47875205685248, 47875205685248, +STORE, 47875205685248, 47875205689343, +STORE, 47875205689344, 47875205693439, +ERASE, 47875205648384, 47875205648384, +STORE, 47875205648384, 47875205652479, +STORE, 47875205652480, 47875205656575, +ERASE, 47875205402624, 47875205402624, +STORE, 47875205402624, 47875205599231, +STORE, 47875205599232, 47875205607423, +ERASE, 47875202519040, 47875202519040, +STORE, 47875202519040, 47875202555903, +STORE, 47875202555904, 47875202572287, +ERASE, 47875201966080, 47875201966080, +STORE, 47875201966080, 47875201970175, +STORE, 47875201970176, 47875201974271, +ERASE, 47875201650688, 47875201650688, +STORE, 47875201650688, 47875201654783, +STORE, 47875201654784, 47875201658879, +ERASE, 47875199381504, 47875199381504, +STORE, 47875199381504, 47875199401983, +STORE, 47875199401984, 47875199406079, +ERASE, 47875199049728, 47875199049728, +STORE, 47875199049728, 47875199053823, +STORE, 47875199053824, 47875199057919, +ERASE, 47875199004672, 47875199004672, +STORE, 47875199004672, 47875199008767, +STORE, 47875199008768, 47875199012863, +ERASE, 94011547025408, 94011547025408, +STORE, 94011547025408, 94011547148287, +STORE, 94011547148288, 94011547152383, +ERASE, 139757598109696, 139757598109696, +STORE, 139757598109696, 139757598113791, +STORE, 139757598113792, 139757598117887, +ERASE, 47875197046784, 47875197046784, +STORE, 94011557584896, 94011557720063, +STORE, 94011557584896, 94011557855231, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557851135, +STORE, 94011557851136, 94011557855231, +ERASE, 94011557851136, 94011557851136, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557847039, +STORE, 94011557847040, 94011557851135, +ERASE, 94011557847040, 94011557847040, +STORE, 94011557584896, 94011557982207, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557978111, +STORE, 94011557978112, 94011557982207, +ERASE, 94011557978112, 94011557978112, +ERASE, 94011557584896, 94011557584896, +STORE, 94011557584896, 94011557974015, +STORE, 94011557974016, 94011557978111, +ERASE, 94011557974016, 94011557974016, +STORE, 140737488347136, 140737488351231, +STORE, 140734130360320, 140737488351231, +ERASE, 140734130360320, 140734130360320, +STORE, 140734130360320, 140734130364415, +STORE, 94641232105472, 94641232785407, +ERASE, 94641232105472, 94641232105472, +STORE, 94641232105472, 94641232171007, +STORE, 94641232171008, 94641232785407, +ERASE, 94641232171008, 94641232171008, +STORE, 94641232171008, 94641232519167, +STORE, 94641232519168, 94641232658431, +STORE, 94641232658432, 94641232785407, +STORE, 139726599516160, 139726599688191, +ERASE, 139726599516160, 139726599516160, +STORE, 139726599516160, 139726599520255, +STORE, 139726599520256, 139726599688191, +ERASE, 139726599520256, 139726599520256, +STORE, 139726599520256, 139726599643135, +STORE, 139726599643136, 139726599675903, +STORE, 139726599675904, 139726599684095, +STORE, 139726599684096, 139726599688191, +STORE, 140734130446336, 140734130450431, +STORE, 140734130434048, 140734130446335, +STORE, 47906195480576, 47906195488767, +STORE, 47906195488768, 47906195496959, +STORE, 47906195496960, 47906197336063, +STORE, 47906195636224, 47906197336063, +STORE, 47906195496960, 47906195636223, +ERASE, 47906195636224, 47906195636224, +STORE, 47906195636224, 47906197295103, +STORE, 47906197295104, 47906197336063, +STORE, 47906196979712, 47906197295103, +STORE, 47906195636224, 47906196979711, +ERASE, 47906195636224, 47906195636224, +STORE, 47906195636224, 47906196979711, +STORE, 47906197291008, 47906197295103, +STORE, 47906196979712, 47906197291007, +ERASE, 47906196979712, 47906196979712, +STORE, 47906196979712, 47906197291007, +STORE, 47906197319680, 47906197336063, +STORE, 47906197295104, 47906197319679, +ERASE, 47906197295104, 47906197295104, +STORE, 47906197295104, 47906197319679, +ERASE, 47906197319680, 47906197319680, +STORE, 47906197319680, 47906197336063, +STORE, 47906197336064, 47906197446655, +STORE, 47906197352448, 47906197446655, +STORE, 47906197336064, 47906197352447, +ERASE, 47906197352448, 47906197352448, +STORE, 47906197352448, 47906197438463, +STORE, 47906197438464, 47906197446655, +STORE, 47906197413888, 47906197438463, +STORE, 47906197352448, 47906197413887, +ERASE, 47906197352448, 47906197352448, +STORE, 47906197352448, 47906197413887, +STORE, 47906197434368, 47906197438463, +STORE, 47906197413888, 47906197434367, +ERASE, 47906197413888, 47906197413888, +STORE, 47906197413888, 47906197434367, +ERASE, 47906197438464, 47906197438464, +STORE, 47906197438464, 47906197446655, +STORE, 47906197446656, 47906197491711, +ERASE, 47906197446656, 47906197446656, +STORE, 47906197446656, 47906197454847, +STORE, 47906197454848, 47906197491711, +STORE, 47906197475328, 47906197491711, +STORE, 47906197454848, 47906197475327, +ERASE, 47906197454848, 47906197454848, +STORE, 47906197454848, 47906197475327, +STORE, 47906197483520, 47906197491711, +STORE, 47906197475328, 47906197483519, +ERASE, 47906197475328, 47906197475328, +STORE, 47906197475328, 47906197491711, +ERASE, 47906197475328, 47906197475328, +STORE, 47906197475328, 47906197483519, +STORE, 47906197483520, 47906197491711, +ERASE, 47906197483520, 47906197483520, +STORE, 47906197483520, 47906197491711, +STORE, 47906197491712, 47906197839871, +STORE, 47906197532672, 47906197839871, +STORE, 47906197491712, 47906197532671, +ERASE, 47906197532672, 47906197532672, +STORE, 47906197532672, 47906197815295, +STORE, 47906197815296, 47906197839871, +STORE, 47906197745664, 47906197815295, +STORE, 47906197532672, 47906197745663, +ERASE, 47906197532672, 47906197532672, +STORE, 47906197532672, 47906197745663, +STORE, 47906197811200, 47906197815295, +STORE, 47906197745664, 47906197811199, +ERASE, 47906197745664, 47906197745664, +STORE, 47906197745664, 47906197811199, +ERASE, 47906197815296, 47906197815296, +STORE, 47906197815296, 47906197839871, +STORE, 47906197839872, 47906200100863, +STORE, 47906197991424, 47906200100863, +STORE, 47906197839872, 47906197991423, +ERASE, 47906197991424, 47906197991424, +STORE, 47906197991424, 47906200084479, +STORE, 47906200084480, 47906200100863, +STORE, 47906200092672, 47906200100863, +STORE, 47906200084480, 47906200092671, +ERASE, 47906200084480, 47906200084480, +STORE, 47906200084480, 47906200092671, +ERASE, 47906200092672, 47906200092672, +STORE, 47906200092672, 47906200100863, +STORE, 47906200100864, 47906200236031, +ERASE, 47906200100864, 47906200100864, +STORE, 47906200100864, 47906200125439, +STORE, 47906200125440, 47906200236031, +STORE, 47906200186880, 47906200236031, +STORE, 47906200125440, 47906200186879, +ERASE, 47906200125440, 47906200125440, +STORE, 47906200125440, 47906200186879, +STORE, 47906200211456, 47906200236031, +STORE, 47906200186880, 47906200211455, +ERASE, 47906200186880, 47906200186880, +STORE, 47906200186880, 47906200236031, +ERASE, 47906200186880, 47906200186880, +STORE, 47906200186880, 47906200211455, +STORE, 47906200211456, 47906200236031, +STORE, 47906200219648, 47906200236031, +STORE, 47906200211456, 47906200219647, +ERASE, 47906200211456, 47906200211456, +STORE, 47906200211456, 47906200219647, +ERASE, 47906200219648, 47906200219648, +STORE, 47906200219648, 47906200236031, +STORE, 47906200219648, 47906200244223, +STORE, 47906200244224, 47906200408063, +ERASE, 47906200244224, 47906200244224, +STORE, 47906200244224, 47906200256511, +STORE, 47906200256512, 47906200408063, +STORE, 47906200354816, 47906200408063, +STORE, 47906200256512, 47906200354815, +ERASE, 47906200256512, 47906200256512, +STORE, 47906200256512, 47906200354815, +STORE, 47906200399872, 47906200408063, +STORE, 47906200354816, 47906200399871, +ERASE, 47906200354816, 47906200354816, +STORE, 47906200354816, 47906200408063, +ERASE, 47906200354816, 47906200354816, +STORE, 47906200354816, 47906200399871, +STORE, 47906200399872, 47906200408063, +ERASE, 47906200399872, 47906200399872, +STORE, 47906200399872, 47906200408063, +STORE, 47906200408064, 47906201006079, +STORE, 47906200526848, 47906201006079, +STORE, 47906200408064, 47906200526847, +ERASE, 47906200526848, 47906200526848, +STORE, 47906200526848, 47906200952831, +STORE, 47906200952832, 47906201006079, +STORE, 47906200842240, 47906200952831, +STORE, 47906200526848, 47906200842239, +ERASE, 47906200526848, 47906200526848, +STORE, 47906200526848, 47906200842239, +STORE, 47906200948736, 47906200952831, +STORE, 47906200842240, 47906200948735, +ERASE, 47906200842240, 47906200842240, +STORE, 47906200842240, 47906200948735, +ERASE, 47906200952832, 47906200952832, +STORE, 47906200952832, 47906201006079, +STORE, 47906201006080, 47906204057599, +STORE, 47906201550848, 47906204057599, +STORE, 47906201006080, 47906201550847, +ERASE, 47906201550848, 47906201550848, +STORE, 47906201550848, 47906203836415, +STORE, 47906203836416, 47906204057599, +STORE, 47906203246592, 47906203836415, +STORE, 47906201550848, 47906203246591, +ERASE, 47906201550848, 47906201550848, +STORE, 47906201550848, 47906203246591, +STORE, 47906203832320, 47906203836415, +STORE, 47906203246592, 47906203832319, +ERASE, 47906203246592, 47906203246592, +STORE, 47906203246592, 47906203832319, +STORE, 47906204041216, 47906204057599, +STORE, 47906203836416, 47906204041215, +ERASE, 47906203836416, 47906203836416, +STORE, 47906203836416, 47906204041215, +ERASE, 47906204041216, 47906204041216, +STORE, 47906204041216, 47906204057599, +STORE, 47906204057600, 47906204090367, +ERASE, 47906204057600, 47906204057600, +STORE, 47906204057600, 47906204065791, +STORE, 47906204065792, 47906204090367, +STORE, 47906204078080, 47906204090367, +STORE, 47906204065792, 47906204078079, +ERASE, 47906204065792, 47906204065792, +STORE, 47906204065792, 47906204078079, +STORE, 47906204082176, 47906204090367, +STORE, 47906204078080, 47906204082175, +ERASE, 47906204078080, 47906204078080, +STORE, 47906204078080, 47906204090367, +ERASE, 47906204078080, 47906204078080, +STORE, 47906204078080, 47906204082175, +STORE, 47906204082176, 47906204090367, +ERASE, 47906204082176, 47906204082176, +STORE, 47906204082176, 47906204090367, +STORE, 47906204090368, 47906204127231, +ERASE, 47906204090368, 47906204090368, +STORE, 47906204090368, 47906204098559, +STORE, 47906204098560, 47906204127231, +STORE, 47906204114944, 47906204127231, +STORE, 47906204098560, 47906204114943, +ERASE, 47906204098560, 47906204098560, +STORE, 47906204098560, 47906204114943, +STORE, 47906204119040, 47906204127231, +STORE, 47906204114944, 47906204119039, +ERASE, 47906204114944, 47906204114944, +STORE, 47906204114944, 47906204127231, +ERASE, 47906204114944, 47906204114944, +STORE, 47906204114944, 47906204119039, +STORE, 47906204119040, 47906204127231, +ERASE, 47906204119040, 47906204119040, +STORE, 47906204119040, 47906204127231, +STORE, 47906204127232, 47906204602367, +ERASE, 47906204127232, 47906204127232, +STORE, 47906204127232, 47906204135423, +STORE, 47906204135424, 47906204602367, +STORE, 47906204471296, 47906204602367, +STORE, 47906204135424, 47906204471295, +ERASE, 47906204135424, 47906204135424, +STORE, 47906204135424, 47906204471295, +STORE, 47906204594176, 47906204602367, +STORE, 47906204471296, 47906204594175, +ERASE, 47906204471296, 47906204471296, +STORE, 47906204471296, 47906204602367, +ERASE, 47906204471296, 47906204471296, +STORE, 47906204471296, 47906204594175, +STORE, 47906204594176, 47906204602367, +ERASE, 47906204594176, 47906204594176, +STORE, 47906204594176, 47906204602367, +STORE, 47906204602368, 47906204610559, +STORE, 47906204610560, 47906204631039, +ERASE, 47906204610560, 47906204610560, +STORE, 47906204610560, 47906204614655, +STORE, 47906204614656, 47906204631039, +STORE, 47906204618752, 47906204631039, +STORE, 47906204614656, 47906204618751, +ERASE, 47906204614656, 47906204614656, +STORE, 47906204614656, 47906204618751, +STORE, 47906204622848, 47906204631039, +STORE, 47906204618752, 47906204622847, +ERASE, 47906204618752, 47906204618752, +STORE, 47906204618752, 47906204631039, +ERASE, 47906204618752, 47906204618752, +STORE, 47906204618752, 47906204622847, +STORE, 47906204622848, 47906204631039, +ERASE, 47906204622848, 47906204622848, +STORE, 47906204622848, 47906204631039, +STORE, 47906204631040, 47906204639231, +ERASE, 47906197295104, 47906197295104, +STORE, 47906197295104, 47906197311487, +STORE, 47906197311488, 47906197319679, +ERASE, 47906204622848, 47906204622848, +STORE, 47906204622848, 47906204626943, +STORE, 47906204626944, 47906204631039, +ERASE, 47906200211456, 47906200211456, +STORE, 47906200211456, 47906200215551, +STORE, 47906200215552, 47906200219647, +ERASE, 47906204594176, 47906204594176, +STORE, 47906204594176, 47906204598271, +STORE, 47906204598272, 47906204602367, +ERASE, 47906204119040, 47906204119040, +STORE, 47906204119040, 47906204123135, +STORE, 47906204123136, 47906204127231, +ERASE, 47906204082176, 47906204082176, +STORE, 47906204082176, 47906204086271, +STORE, 47906204086272, 47906204090367, +ERASE, 47906203836416, 47906203836416, +STORE, 47906203836416, 47906204033023, +STORE, 47906204033024, 47906204041215, +ERASE, 47906200952832, 47906200952832, +STORE, 47906200952832, 47906200989695, +STORE, 47906200989696, 47906201006079, +ERASE, 47906200399872, 47906200399872, +STORE, 47906200399872, 47906200403967, +STORE, 47906200403968, 47906200408063, +ERASE, 47906200084480, 47906200084480, +STORE, 47906200084480, 47906200088575, +STORE, 47906200088576, 47906200092671, +ERASE, 47906197815296, 47906197815296, +STORE, 47906197815296, 47906197835775, +STORE, 47906197835776, 47906197839871, +ERASE, 47906197483520, 47906197483520, +STORE, 47906197483520, 47906197487615, +STORE, 47906197487616, 47906197491711, +ERASE, 47906197438464, 47906197438464, +STORE, 47906197438464, 47906197442559, +STORE, 47906197442560, 47906197446655, +ERASE, 94641232658432, 94641232658432, +STORE, 94641232658432, 94641232781311, +STORE, 94641232781312, 94641232785407, +ERASE, 139726599675904, 139726599675904, +STORE, 139726599675904, 139726599679999, +STORE, 139726599680000, 139726599684095, +ERASE, 47906195480576, 47906195480576, +STORE, 94641242615808, 94641242750975, + }; + unsigned long set11[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140732658499584, 140737488351231, +ERASE, 140732658499584, 140732658499584, +STORE, 140732658499584, 140732658503679, +STORE, 94029856579584, 94029856751615, +ERASE, 94029856579584, 94029856579584, +STORE, 94029856579584, 94029856595967, +STORE, 94029856595968, 94029856751615, +ERASE, 94029856595968, 94029856595968, +STORE, 94029856595968, 94029856698367, +STORE, 94029856698368, 94029856739327, +STORE, 94029856739328, 94029856751615, +STORE, 140014592573440, 140014592745471, +ERASE, 140014592573440, 140014592573440, +STORE, 140014592573440, 140014592577535, +STORE, 140014592577536, 140014592745471, +ERASE, 140014592577536, 140014592577536, +STORE, 140014592577536, 140014592700415, +STORE, 140014592700416, 140014592733183, +STORE, 140014592733184, 140014592741375, +STORE, 140014592741376, 140014592745471, +STORE, 140732658565120, 140732658569215, +STORE, 140732658552832, 140732658565119, + }; + + unsigned long set12[] = { /* contains 12 values. */ +STORE, 140737488347136, 140737488351231, +STORE, 140732658499584, 140737488351231, +ERASE, 140732658499584, 140732658499584, +STORE, 140732658499584, 140732658503679, +STORE, 94029856579584, 94029856751615, +ERASE, 94029856579584, 94029856579584, +STORE, 94029856579584, 94029856595967, +STORE, 94029856595968, 94029856751615, +ERASE, 94029856595968, 94029856595968, +STORE, 94029856595968, 94029856698367, +STORE, 94029856698368, 94029856739327, +STORE, 94029856739328, 94029856751615, +STORE, 140014592573440, 140014592745471, +ERASE, 140014592573440, 140014592573440, +STORE, 140014592573440, 140014592577535, +STORE, 140014592577536, 140014592745471, +ERASE, 140014592577536, 140014592577536, +STORE, 140014592577536, 140014592700415, +STORE, 140014592700416, 140014592733183, +STORE, 140014592733184, 140014592741375, +STORE, 140014592741376, 140014592745471, +STORE, 140732658565120, 140732658569215, +STORE, 140732658552832, 140732658565119, +STORE, 140014592741375, 140014592741375, /* contrived */ +STORE, 140014592733184, 140014592741376, /* creates first entry retry. */ + }; + unsigned long set13[] = { +STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */ +STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */ +STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */ +STORE, 140373516443648, 140373516587007,/*: ffffa2e7b05fecc0 */ +/*STORE, 140373516587008, 140373516963839,//: 0000000000000000 */ +STORE, 140373516963840, 140373518647295,/*: ffffa2e7bfbdcc00 */ +STORE, 140373518647296, 140373518663679,/*: ffffa2e7bf5d59c0 */ +STORE, 140373518663680, 140373518684159,/*: deleted (257) */ +/*STORE, 140373518684160, 140373518680063,//: 0000000000000000 */ +STORE, 140373518680064, 140373518684159,/*: ffffa2e7b0e1cb40 */ +STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */ +STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */ +STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */ + }; + unsigned long set14[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140731667996672, 140737488351231, +SNULL, 140731668000767, 140737488351231, +STORE, 140731667996672, 140731668000767, +STORE, 140731667865600, 140731668000767, +STORE, 94077521272832, 94077521313791, +SNULL, 94077521301503, 94077521313791, +STORE, 94077521272832, 94077521301503, +STORE, 94077521301504, 94077521313791, +ERASE, 94077521301504, 94077521313791, +STORE, 94077521305600, 94077521313791, +STORE, 139826134630400, 139826136883199, +SNULL, 139826134773759, 139826136883199, +STORE, 139826134630400, 139826134773759, +STORE, 139826134773760, 139826136883199, +ERASE, 139826134773760, 139826136883199, +STORE, 139826136870912, 139826136879103, +STORE, 139826136879104, 139826136883199, +STORE, 140731668013056, 140731668017151, +STORE, 140731668000768, 140731668013055, +STORE, 139826136862720, 139826136870911, +STORE, 139826132406272, 139826134630399, +SNULL, 139826134056959, 139826134630399, +STORE, 139826132406272, 139826134056959, +STORE, 139826134056960, 139826134630399, +SNULL, 139826134056960, 139826134626303, +STORE, 139826134626304, 139826134630399, +STORE, 139826134056960, 139826134626303, +ERASE, 139826134056960, 139826134626303, +STORE, 139826134056960, 139826134626303, +ERASE, 139826134626304, 139826134630399, +STORE, 139826134626304, 139826134630399, +STORE, 139826136842240, 139826136862719, +STORE, 139826130022400, 139826132406271, +SNULL, 139826130022400, 139826130288639, +STORE, 139826130288640, 139826132406271, +STORE, 139826130022400, 139826130288639, +SNULL, 139826132381695, 139826132406271, +STORE, 139826130288640, 139826132381695, +STORE, 139826132381696, 139826132406271, +SNULL, 139826132381696, 139826132402175, +STORE, 139826132402176, 139826132406271, +STORE, 139826132381696, 139826132402175, +ERASE, 139826132381696, 139826132402175, +STORE, 139826132381696, 139826132402175, +ERASE, 139826132402176, 139826132406271, +STORE, 139826132402176, 139826132406271, +STORE, 139826127806464, 139826130022399, +SNULL, 139826127806464, 139826127904767, +STORE, 139826127904768, 139826130022399, +STORE, 139826127806464, 139826127904767, +SNULL, 139826129997823, 139826130022399, +STORE, 139826127904768, 139826129997823, +STORE, 139826129997824, 139826130022399, +SNULL, 139826129997824, 139826130006015, +STORE, 139826130006016, 139826130022399, +STORE, 139826129997824, 139826130006015, +ERASE, 139826129997824, 139826130006015, +STORE, 139826129997824, 139826130006015, +ERASE, 139826130006016, 139826130022399, +STORE, 139826130006016, 139826130022399, +STORE, 139826124009472, 139826127806463, +SNULL, 139826124009472, 139826125668351, +STORE, 139826125668352, 139826127806463, +STORE, 139826124009472, 139826125668351, +SNULL, 139826127765503, 139826127806463, +STORE, 139826125668352, 139826127765503, +STORE, 139826127765504, 139826127806463, +SNULL, 139826127765504, 139826127790079, +STORE, 139826127790080, 139826127806463, +STORE, 139826127765504, 139826127790079, +ERASE, 139826127765504, 139826127790079, +STORE, 139826127765504, 139826127790079, +ERASE, 139826127790080, 139826127806463, +STORE, 139826127790080, 139826127806463, +STORE, 139826121748480, 139826124009471, +SNULL, 139826121748480, 139826121900031, +STORE, 139826121900032, 139826124009471, +STORE, 139826121748480, 139826121900031, +SNULL, 139826123993087, 139826124009471, +STORE, 139826121900032, 139826123993087, +STORE, 139826123993088, 139826124009471, +SNULL, 139826123993088, 139826124001279, +STORE, 139826124001280, 139826124009471, +STORE, 139826123993088, 139826124001279, +ERASE, 139826123993088, 139826124001279, +STORE, 139826123993088, 139826124001279, +ERASE, 139826124001280, 139826124009471, +STORE, 139826124001280, 139826124009471, +STORE, 139826119626752, 139826121748479, +SNULL, 139826119626752, 139826119643135, +STORE, 139826119643136, 139826121748479, +STORE, 139826119626752, 139826119643135, +SNULL, 139826121740287, 139826121748479, +STORE, 139826119643136, 139826121740287, +STORE, 139826121740288, 139826121748479, +ERASE, 139826121740288, 139826121748479, +STORE, 139826121740288, 139826121748479, +STORE, 139826136834048, 139826136842239, +STORE, 139826117496832, 139826119626751, +SNULL, 139826117496832, 139826117525503, +STORE, 139826117525504, 139826119626751, +STORE, 139826117496832, 139826117525503, +SNULL, 139826119618559, 139826119626751, +STORE, 139826117525504, 139826119618559, +STORE, 139826119618560, 139826119626751, +ERASE, 139826119618560, 139826119626751, +STORE, 139826119618560, 139826119626751, +STORE, 139826115244032, 139826117496831, +SNULL, 139826115244032, 139826115395583, +STORE, 139826115395584, 139826117496831, +STORE, 139826115244032, 139826115395583, +SNULL, 139826117488639, 139826117496831, +STORE, 139826115395584, 139826117488639, +STORE, 139826117488640, 139826117496831, +ERASE, 139826117488640, 139826117496831, +STORE, 139826117488640, 139826117496831, +STORE, 139826113073152, 139826115244031, +SNULL, 139826113073152, 139826113142783, +STORE, 139826113142784, 139826115244031, +STORE, 139826113073152, 139826113142783, +SNULL, 139826115235839, 139826115244031, +STORE, 139826113142784, 139826115235839, +STORE, 139826115235840, 139826115244031, +ERASE, 139826115235840, 139826115244031, +STORE, 139826115235840, 139826115244031, +STORE, 139826109861888, 139826113073151, +SNULL, 139826109861888, 139826110939135, +STORE, 139826110939136, 139826113073151, +STORE, 139826109861888, 139826110939135, +SNULL, 139826113036287, 139826113073151, +STORE, 139826110939136, 139826113036287, +STORE, 139826113036288, 139826113073151, +ERASE, 139826113036288, 139826113073151, +STORE, 139826113036288, 139826113073151, +STORE, 139826107727872, 139826109861887, +SNULL, 139826107727872, 139826107756543, +STORE, 139826107756544, 139826109861887, +STORE, 139826107727872, 139826107756543, +SNULL, 139826109853695, 139826109861887, +STORE, 139826107756544, 139826109853695, +STORE, 139826109853696, 139826109861887, +ERASE, 139826109853696, 139826109861887, +STORE, 139826109853696, 139826109861887, +STORE, 139826105417728, 139826107727871, +SNULL, 139826105417728, 139826105622527, +STORE, 139826105622528, 139826107727871, +STORE, 139826105417728, 139826105622527, +SNULL, 139826107719679, 139826107727871, +STORE, 139826105622528, 139826107719679, +STORE, 139826107719680, 139826107727871, +ERASE, 139826107719680, 139826107727871, +STORE, 139826107719680, 139826107727871, +STORE, 139826136825856, 139826136842239, +STORE, 139826103033856, 139826105417727, +SNULL, 139826103033856, 139826103226367, +STORE, 139826103226368, 139826105417727, +STORE, 139826103033856, 139826103226367, +SNULL, 139826105319423, 139826105417727, +STORE, 139826103226368, 139826105319423, +STORE, 139826105319424, 139826105417727, +ERASE, 139826105319424, 139826105417727, +STORE, 139826105319424, 139826105417727, +STORE, 139826100916224, 139826103033855, +SNULL, 139826100916224, 139826100932607, +STORE, 139826100932608, 139826103033855, +STORE, 139826100916224, 139826100932607, +SNULL, 139826103025663, 139826103033855, +STORE, 139826100932608, 139826103025663, +STORE, 139826103025664, 139826103033855, +ERASE, 139826103025664, 139826103033855, +STORE, 139826103025664, 139826103033855, +STORE, 139826098348032, 139826100916223, +SNULL, 139826098348032, 139826098814975, +STORE, 139826098814976, 139826100916223, +STORE, 139826098348032, 139826098814975, +SNULL, 139826100908031, 139826100916223, +STORE, 139826098814976, 139826100908031, +STORE, 139826100908032, 139826100916223, +ERASE, 139826100908032, 139826100916223, +STORE, 139826100908032, 139826100916223, +STORE, 139826096234496, 139826098348031, +SNULL, 139826096234496, 139826096246783, +STORE, 139826096246784, 139826098348031, +STORE, 139826096234496, 139826096246783, +SNULL, 139826098339839, 139826098348031, +STORE, 139826096246784, 139826098339839, +STORE, 139826098339840, 139826098348031, +ERASE, 139826098339840, 139826098348031, +STORE, 139826098339840, 139826098348031, +STORE, 139826094055424, 139826096234495, +SNULL, 139826094055424, 139826094133247, +STORE, 139826094133248, 139826096234495, +STORE, 139826094055424, 139826094133247, +SNULL, 139826096226303, 139826096234495, +STORE, 139826094133248, 139826096226303, +STORE, 139826096226304, 139826096234495, +ERASE, 139826096226304, 139826096234495, +STORE, 139826096226304, 139826096234495, +STORE, 139826136817664, 139826136842239, +STORE, 139826091937792, 139826094055423, +SNULL, 139826091937792, 139826091954175, +STORE, 139826091954176, 139826094055423, +STORE, 139826091937792, 139826091954175, +SNULL, 139826094047231, 139826094055423, +STORE, 139826091954176, 139826094047231, +STORE, 139826094047232, 139826094055423, +ERASE, 139826094047232, 139826094055423, +STORE, 139826094047232, 139826094055423, +STORE, 139826136809472, 139826136842239, +SNULL, 139826127781887, 139826127790079, +STORE, 139826127765504, 139826127781887, +STORE, 139826127781888, 139826127790079, +SNULL, 139826094051327, 139826094055423, +STORE, 139826094047232, 139826094051327, +STORE, 139826094051328, 139826094055423, +SNULL, 139826096230399, 139826096234495, +STORE, 139826096226304, 139826096230399, +STORE, 139826096230400, 139826096234495, +SNULL, 139826098343935, 139826098348031, +STORE, 139826098339840, 139826098343935, +STORE, 139826098343936, 139826098348031, +SNULL, 139826130001919, 139826130006015, +STORE, 139826129997824, 139826130001919, +STORE, 139826130001920, 139826130006015, +SNULL, 139826100912127, 139826100916223, +STORE, 139826100908032, 139826100912127, +STORE, 139826100912128, 139826100916223, +SNULL, 139826103029759, 139826103033855, +STORE, 139826103025664, 139826103029759, +STORE, 139826103029760, 139826103033855, +SNULL, 139826105413631, 139826105417727, +STORE, 139826105319424, 139826105413631, +STORE, 139826105413632, 139826105417727, +SNULL, 139826107723775, 139826107727871, +STORE, 139826107719680, 139826107723775, +STORE, 139826107723776, 139826107727871, +SNULL, 139826109857791, 139826109861887, +STORE, 139826109853696, 139826109857791, +STORE, 139826109857792, 139826109861887, +SNULL, 139826113044479, 139826113073151, +STORE, 139826113036288, 139826113044479, +STORE, 139826113044480, 139826113073151, +SNULL, 139826115239935, 139826115244031, +STORE, 139826115235840, 139826115239935, +STORE, 139826115239936, 139826115244031, +SNULL, 139826117492735, 139826117496831, +STORE, 139826117488640, 139826117492735, +STORE, 139826117492736, 139826117496831, +SNULL, 139826119622655, 139826119626751, +STORE, 139826119618560, 139826119622655, +STORE, 139826119622656, 139826119626751, +SNULL, 139826121744383, 139826121748479, +STORE, 139826121740288, 139826121744383, +STORE, 139826121744384, 139826121748479, +SNULL, 139826123997183, 139826124001279, +STORE, 139826123993088, 139826123997183, +STORE, 139826123997184, 139826124001279, +SNULL, 139826132398079, 139826132402175, +STORE, 139826132381696, 139826132398079, +STORE, 139826132398080, 139826132402175, +SNULL, 139826134622207, 139826134626303, +STORE, 139826134056960, 139826134622207, +STORE, 139826134622208, 139826134626303, +SNULL, 94077521309695, 94077521313791, +STORE, 94077521305600, 94077521309695, +STORE, 94077521309696, 94077521313791, +SNULL, 139826136875007, 139826136879103, +STORE, 139826136870912, 139826136875007, +STORE, 139826136875008, 139826136879103, +ERASE, 139826136842240, 139826136862719, +STORE, 94077554049024, 94077554184191, +STORE, 139826136543232, 139826136842239, +STORE, 139826136276992, 139826136842239, +STORE, 139826136010752, 139826136842239, +STORE, 139826135744512, 139826136842239, +SNULL, 139826136543231, 139826136842239, +STORE, 139826135744512, 139826136543231, +STORE, 139826136543232, 139826136842239, +SNULL, 139826136543232, 139826136809471, +STORE, 139826136809472, 139826136842239, +STORE, 139826136543232, 139826136809471, + }; + unsigned long set15[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140722061451264, 140737488351231, +SNULL, 140722061455359, 140737488351231, +STORE, 140722061451264, 140722061455359, +STORE, 140722061320192, 140722061455359, +STORE, 94728600248320, 94728600289279, +SNULL, 94728600276991, 94728600289279, +STORE, 94728600248320, 94728600276991, +STORE, 94728600276992, 94728600289279, +ERASE, 94728600276992, 94728600289279, +STORE, 94728600281088, 94728600289279, +STORE, 139906806779904, 139906809032703, +SNULL, 139906806923263, 139906809032703, +STORE, 139906806779904, 139906806923263, +STORE, 139906806923264, 139906809032703, +ERASE, 139906806923264, 139906809032703, +STORE, 139906809020416, 139906809028607, +STORE, 139906809028608, 139906809032703, +STORE, 140722061692928, 140722061697023, +STORE, 140722061680640, 140722061692927, +STORE, 139906809012224, 139906809020415, +STORE, 139906804555776, 139906806779903, +SNULL, 139906806206463, 139906806779903, +STORE, 139906804555776, 139906806206463, +STORE, 139906806206464, 139906806779903, +SNULL, 139906806206464, 139906806775807, +STORE, 139906806775808, 139906806779903, +STORE, 139906806206464, 139906806775807, +ERASE, 139906806206464, 139906806775807, +STORE, 139906806206464, 139906806775807, +ERASE, 139906806775808, 139906806779903, +STORE, 139906806775808, 139906806779903, +STORE, 139906808991744, 139906809012223, +STORE, 139906802171904, 139906804555775, +SNULL, 139906802171904, 139906802438143, +STORE, 139906802438144, 139906804555775, +STORE, 139906802171904, 139906802438143, +SNULL, 139906804531199, 139906804555775, +STORE, 139906802438144, 139906804531199, +STORE, 139906804531200, 139906804555775, +SNULL, 139906804531200, 139906804551679, +STORE, 139906804551680, 139906804555775, +STORE, 139906804531200, 139906804551679, +ERASE, 139906804531200, 139906804551679, +STORE, 139906804531200, 139906804551679, +ERASE, 139906804551680, 139906804555775, +STORE, 139906804551680, 139906804555775, +STORE, 139906799955968, 139906802171903, +SNULL, 139906799955968, 139906800054271, +STORE, 139906800054272, 139906802171903, +STORE, 139906799955968, 139906800054271, +SNULL, 139906802147327, 139906802171903, +STORE, 139906800054272, 139906802147327, +STORE, 139906802147328, 139906802171903, +SNULL, 139906802147328, 139906802155519, +STORE, 139906802155520, 139906802171903, +STORE, 139906802147328, 139906802155519, +ERASE, 139906802147328, 139906802155519, +STORE, 139906802147328, 139906802155519, +ERASE, 139906802155520, 139906802171903, +STORE, 139906802155520, 139906802171903, +STORE, 139906796158976, 139906799955967, +SNULL, 139906796158976, 139906797817855, +STORE, 139906797817856, 139906799955967, +STORE, 139906796158976, 139906797817855, +SNULL, 139906799915007, 139906799955967, +STORE, 139906797817856, 139906799915007, +STORE, 139906799915008, 139906799955967, +SNULL, 139906799915008, 139906799939583, +STORE, 139906799939584, 139906799955967, +STORE, 139906799915008, 139906799939583, +ERASE, 139906799915008, 139906799939583, +STORE, 139906799915008, 139906799939583, +ERASE, 139906799939584, 139906799955967, +STORE, 139906799939584, 139906799955967, +STORE, 139906793897984, 139906796158975, +SNULL, 139906793897984, 139906794049535, +STORE, 139906794049536, 139906796158975, +STORE, 139906793897984, 139906794049535, +SNULL, 139906796142591, 139906796158975, +STORE, 139906794049536, 139906796142591, +STORE, 139906796142592, 139906796158975, +SNULL, 139906796142592, 139906796150783, +STORE, 139906796150784, 139906796158975, +STORE, 139906796142592, 139906796150783, +ERASE, 139906796142592, 139906796150783, +STORE, 139906796142592, 139906796150783, +ERASE, 139906796150784, 139906796158975, +STORE, 139906796150784, 139906796158975, +STORE, 139906791776256, 139906793897983, +SNULL, 139906791776256, 139906791792639, +STORE, 139906791792640, 139906793897983, +STORE, 139906791776256, 139906791792639, +SNULL, 139906793889791, 139906793897983, +STORE, 139906791792640, 139906793889791, +STORE, 139906793889792, 139906793897983, +ERASE, 139906793889792, 139906793897983, +STORE, 139906793889792, 139906793897983, +STORE, 139906808983552, 139906808991743, +STORE, 139906789646336, 139906791776255, +SNULL, 139906789646336, 139906789675007, +STORE, 139906789675008, 139906791776255, +STORE, 139906789646336, 139906789675007, +SNULL, 139906791768063, 139906791776255, +STORE, 139906789675008, 139906791768063, +STORE, 139906791768064, 139906791776255, +ERASE, 139906791768064, 139906791776255, +STORE, 139906791768064, 139906791776255, +STORE, 139906787393536, 139906789646335, +SNULL, 139906787393536, 139906787545087, +STORE, 139906787545088, 139906789646335, +STORE, 139906787393536, 139906787545087, +SNULL, 139906789638143, 139906789646335, +STORE, 139906787545088, 139906789638143, +STORE, 139906789638144, 139906789646335, +ERASE, 139906789638144, 139906789646335, +STORE, 139906789638144, 139906789646335, +STORE, 139906785222656, 139906787393535, +SNULL, 139906785222656, 139906785292287, +STORE, 139906785292288, 139906787393535, +STORE, 139906785222656, 139906785292287, +SNULL, 139906787385343, 139906787393535, +STORE, 139906785292288, 139906787385343, +STORE, 139906787385344, 139906787393535, +ERASE, 139906787385344, 139906787393535, +STORE, 139906787385344, 139906787393535, +STORE, 139906782011392, 139906785222655, +SNULL, 139906782011392, 139906783088639, +STORE, 139906783088640, 139906785222655, +STORE, 139906782011392, 139906783088639, +SNULL, 139906785185791, 139906785222655, +STORE, 139906783088640, 139906785185791, +STORE, 139906785185792, 139906785222655, +ERASE, 139906785185792, 139906785222655, +STORE, 139906785185792, 139906785222655, +STORE, 139906779877376, 139906782011391, +SNULL, 139906779877376, 139906779906047, +STORE, 139906779906048, 139906782011391, +STORE, 139906779877376, 139906779906047, +SNULL, 139906782003199, 139906782011391, +STORE, 139906779906048, 139906782003199, +STORE, 139906782003200, 139906782011391, +ERASE, 139906782003200, 139906782011391, +STORE, 139906782003200, 139906782011391, +STORE, 139906777567232, 139906779877375, +SNULL, 139906777567232, 139906777772031, +STORE, 139906777772032, 139906779877375, +STORE, 139906777567232, 139906777772031, +SNULL, 139906779869183, 139906779877375, +STORE, 139906777772032, 139906779869183, +STORE, 139906779869184, 139906779877375, +ERASE, 139906779869184, 139906779877375, +STORE, 139906779869184, 139906779877375, +STORE, 139906808975360, 139906808991743, +STORE, 139906775183360, 139906777567231, +SNULL, 139906775183360, 139906775375871, +STORE, 139906775375872, 139906777567231, +STORE, 139906775183360, 139906775375871, +SNULL, 139906777468927, 139906777567231, +STORE, 139906775375872, 139906777468927, +STORE, 139906777468928, 139906777567231, +ERASE, 139906777468928, 139906777567231, +STORE, 139906777468928, 139906777567231, +STORE, 139906773065728, 139906775183359, +SNULL, 139906773065728, 139906773082111, +STORE, 139906773082112, 139906775183359, +STORE, 139906773065728, 139906773082111, +SNULL, 139906775175167, 139906775183359, +STORE, 139906773082112, 139906775175167, +STORE, 139906775175168, 139906775183359, +ERASE, 139906775175168, 139906775183359, +STORE, 139906775175168, 139906775183359, +STORE, 139906770497536, 139906773065727, +SNULL, 139906770497536, 139906770964479, +STORE, 139906770964480, 139906773065727, +STORE, 139906770497536, 139906770964479, +SNULL, 139906773057535, 139906773065727, +STORE, 139906770964480, 139906773057535, +STORE, 139906773057536, 139906773065727, +ERASE, 139906773057536, 139906773065727, +STORE, 139906773057536, 139906773065727, +STORE, 139906768384000, 139906770497535, +SNULL, 139906768384000, 139906768396287, +STORE, 139906768396288, 139906770497535, +STORE, 139906768384000, 139906768396287, +SNULL, 139906770489343, 139906770497535, +STORE, 139906768396288, 139906770489343, +STORE, 139906770489344, 139906770497535, +ERASE, 139906770489344, 139906770497535, +STORE, 139906770489344, 139906770497535, +STORE, 139906766204928, 139906768383999, +SNULL, 139906766204928, 139906766282751, +STORE, 139906766282752, 139906768383999, +STORE, 139906766204928, 139906766282751, +SNULL, 139906768375807, 139906768383999, +STORE, 139906766282752, 139906768375807, +STORE, 139906768375808, 139906768383999, +ERASE, 139906768375808, 139906768383999, +STORE, 139906768375808, 139906768383999, +STORE, 139906808967168, 139906808991743, +STORE, 139906764087296, 139906766204927, +SNULL, 139906764087296, 139906764103679, +STORE, 139906764103680, 139906766204927, +STORE, 139906764087296, 139906764103679, +SNULL, 139906766196735, 139906766204927, +STORE, 139906764103680, 139906766196735, +STORE, 139906766196736, 139906766204927, +ERASE, 139906766196736, 139906766204927, +STORE, 139906766196736, 139906766204927, +STORE, 139906808958976, 139906808991743, +SNULL, 139906799931391, 139906799939583, +STORE, 139906799915008, 139906799931391, +STORE, 139906799931392, 139906799939583, +SNULL, 139906766200831, 139906766204927, +STORE, 139906766196736, 139906766200831, +STORE, 139906766200832, 139906766204927, +SNULL, 139906768379903, 139906768383999, +STORE, 139906768375808, 139906768379903, +STORE, 139906768379904, 139906768383999, +SNULL, 139906770493439, 139906770497535, +STORE, 139906770489344, 139906770493439, +STORE, 139906770493440, 139906770497535, +SNULL, 139906802151423, 139906802155519, +STORE, 139906802147328, 139906802151423, +STORE, 139906802151424, 139906802155519, +SNULL, 139906773061631, 139906773065727, +STORE, 139906773057536, 139906773061631, +STORE, 139906773061632, 139906773065727, +SNULL, 139906775179263, 139906775183359, +STORE, 139906775175168, 139906775179263, +STORE, 139906775179264, 139906775183359, +SNULL, 139906777563135, 139906777567231, +STORE, 139906777468928, 139906777563135, +STORE, 139906777563136, 139906777567231, +SNULL, 139906779873279, 139906779877375, +STORE, 139906779869184, 139906779873279, +STORE, 139906779873280, 139906779877375, +SNULL, 139906782007295, 139906782011391, +STORE, 139906782003200, 139906782007295, +STORE, 139906782007296, 139906782011391, +SNULL, 139906785193983, 139906785222655, +STORE, 139906785185792, 139906785193983, +STORE, 139906785193984, 139906785222655, +SNULL, 139906787389439, 139906787393535, +STORE, 139906787385344, 139906787389439, +STORE, 139906787389440, 139906787393535, +SNULL, 139906789642239, 139906789646335, +STORE, 139906789638144, 139906789642239, +STORE, 139906789642240, 139906789646335, +SNULL, 139906791772159, 139906791776255, +STORE, 139906791768064, 139906791772159, +STORE, 139906791772160, 139906791776255, +SNULL, 139906793893887, 139906793897983, +STORE, 139906793889792, 139906793893887, +STORE, 139906793893888, 139906793897983, +SNULL, 139906796146687, 139906796150783, +STORE, 139906796142592, 139906796146687, +STORE, 139906796146688, 139906796150783, +SNULL, 139906804547583, 139906804551679, +STORE, 139906804531200, 139906804547583, +STORE, 139906804547584, 139906804551679, +SNULL, 139906806771711, 139906806775807, +STORE, 139906806206464, 139906806771711, +STORE, 139906806771712, 139906806775807, +SNULL, 94728600285183, 94728600289279, +STORE, 94728600281088, 94728600285183, +STORE, 94728600285184, 94728600289279, +SNULL, 139906809024511, 139906809028607, +STORE, 139906809020416, 139906809024511, +STORE, 139906809024512, 139906809028607, +ERASE, 139906808991744, 139906809012223, +STORE, 94728620138496, 94728620273663, +STORE, 139906808692736, 139906808991743, +STORE, 139906808426496, 139906808991743, +STORE, 139906808160256, 139906808991743, +STORE, 139906807894016, 139906808991743, +SNULL, 139906808692735, 139906808991743, +STORE, 139906807894016, 139906808692735, +STORE, 139906808692736, 139906808991743, +SNULL, 139906808692736, 139906808958975, +STORE, 139906808958976, 139906808991743, +STORE, 139906808692736, 139906808958975, + }; + + unsigned long set16[] = { +STORE, 94174808662016, 94174809321471, +STORE, 94174811414528, 94174811426815, +STORE, 94174811426816, 94174811430911, +STORE, 94174811430912, 94174811443199, +STORE, 94174841700352, 94174841835519, +STORE, 140173257838592, 140173259497471, +STORE, 140173259497472, 140173261594623, +STORE, 140173261594624, 140173261611007, +STORE, 140173261611008, 140173261619199, +STORE, 140173261619200, 140173261635583, +STORE, 140173261635584, 140173261778943, +STORE, 140173263863808, 140173263871999, +STORE, 140173263876096, 140173263880191, +STORE, 140173263880192, 140173263884287, +STORE, 140173263884288, 140173263888383, +STORE, 140729801007104, 140729801142271, +STORE, 140729801617408, 140729801629695, +STORE, 140729801629696, 140729801633791, +STORE, 140737488347136, 140737488351231, +STORE, 140728166858752, 140737488351231, +SNULL, 140728166862847, 140737488351231, +STORE, 140728166858752, 140728166862847, +STORE, 140728166727680, 140728166862847, +STORE, 93912949866496, 93912950337535, +SNULL, 93912950288383, 93912950337535, +STORE, 93912949866496, 93912950288383, +STORE, 93912950288384, 93912950337535, +ERASE, 93912950288384, 93912950337535, +STORE, 93912950292480, 93912950337535, +STORE, 139921863385088, 139921865637887, +SNULL, 139921863528447, 139921865637887, +STORE, 139921863385088, 139921863528447, +STORE, 139921863528448, 139921865637887, +ERASE, 139921863528448, 139921865637887, +STORE, 139921865625600, 139921865633791, +STORE, 139921865633792, 139921865637887, +STORE, 140728167899136, 140728167903231, +STORE, 140728167886848, 140728167899135, +STORE, 139921865601024, 139921865625599, +STORE, 139921865592832, 139921865601023, +STORE, 139921861251072, 139921863385087, +SNULL, 139921861251072, 139921861279743, +STORE, 139921861279744, 139921863385087, +STORE, 139921861251072, 139921861279743, +SNULL, 139921863376895, 139921863385087, +STORE, 139921861279744, 139921863376895, +STORE, 139921863376896, 139921863385087, +ERASE, 139921863376896, 139921863385087, +STORE, 139921863376896, 139921863385087, +STORE, 139921858867200, 139921861251071, +SNULL, 139921858867200, 139921859133439, +STORE, 139921859133440, 139921861251071, +STORE, 139921858867200, 139921859133439, +SNULL, 139921861226495, 139921861251071, +STORE, 139921859133440, 139921861226495, +STORE, 139921861226496, 139921861251071, +SNULL, 139921861226496, 139921861246975, +STORE, 139921861246976, 139921861251071, +STORE, 139921861226496, 139921861246975, +ERASE, 139921861226496, 139921861246975, +STORE, 139921861226496, 139921861246975, +ERASE, 139921861246976, 139921861251071, +STORE, 139921861246976, 139921861251071, +STORE, 139921856675840, 139921858867199, +SNULL, 139921856675840, 139921856765951, +STORE, 139921856765952, 139921858867199, +STORE, 139921856675840, 139921856765951, +SNULL, 139921858859007, 139921858867199, +STORE, 139921856765952, 139921858859007, +STORE, 139921858859008, 139921858867199, +ERASE, 139921858859008, 139921858867199, +STORE, 139921858859008, 139921858867199, +STORE, 139921854414848, 139921856675839, +SNULL, 139921854414848, 139921854566399, +STORE, 139921854566400, 139921856675839, +STORE, 139921854414848, 139921854566399, +SNULL, 139921856659455, 139921856675839, +STORE, 139921854566400, 139921856659455, +STORE, 139921856659456, 139921856675839, +SNULL, 139921856659456, 139921856667647, +STORE, 139921856667648, 139921856675839, +STORE, 139921856659456, 139921856667647, +ERASE, 139921856659456, 139921856667647, +STORE, 139921856659456, 139921856667647, +ERASE, 139921856667648, 139921856675839, +STORE, 139921856667648, 139921856675839, +STORE, 139921852284928, 139921854414847, +SNULL, 139921852284928, 139921852313599, +STORE, 139921852313600, 139921854414847, +STORE, 139921852284928, 139921852313599, +SNULL, 139921854406655, 139921854414847, +STORE, 139921852313600, 139921854406655, +STORE, 139921854406656, 139921854414847, +ERASE, 139921854406656, 139921854414847, +STORE, 139921854406656, 139921854414847, +STORE, 139921850068992, 139921852284927, +SNULL, 139921850068992, 139921850167295, +STORE, 139921850167296, 139921852284927, +STORE, 139921850068992, 139921850167295, +SNULL, 139921852260351, 139921852284927, +STORE, 139921850167296, 139921852260351, +STORE, 139921852260352, 139921852284927, +SNULL, 139921852260352, 139921852268543, +STORE, 139921852268544, 139921852284927, +STORE, 139921852260352, 139921852268543, +ERASE, 139921852260352, 139921852268543, +STORE, 139921852260352, 139921852268543, +ERASE, 139921852268544, 139921852284927, +STORE, 139921852268544, 139921852284927, +STORE, 139921865584640, 139921865601023, +STORE, 139921846272000, 139921850068991, +SNULL, 139921846272000, 139921847930879, +STORE, 139921847930880, 139921850068991, +STORE, 139921846272000, 139921847930879, +SNULL, 139921850028031, 139921850068991, +STORE, 139921847930880, 139921850028031, +STORE, 139921850028032, 139921850068991, +SNULL, 139921850028032, 139921850052607, +STORE, 139921850052608, 139921850068991, +STORE, 139921850028032, 139921850052607, +ERASE, 139921850028032, 139921850052607, +STORE, 139921850028032, 139921850052607, +ERASE, 139921850052608, 139921850068991, +STORE, 139921850052608, 139921850068991, +STORE, 139921844154368, 139921846271999, +SNULL, 139921844154368, 139921844170751, +STORE, 139921844170752, 139921846271999, +STORE, 139921844154368, 139921844170751, +SNULL, 139921846263807, 139921846271999, +STORE, 139921844170752, 139921846263807, +STORE, 139921846263808, 139921846271999, +ERASE, 139921846263808, 139921846271999, +STORE, 139921846263808, 139921846271999, +STORE, 139921842036736, 139921844154367, +SNULL, 139921842036736, 139921842053119, +STORE, 139921842053120, 139921844154367, +STORE, 139921842036736, 139921842053119, +SNULL, 139921844146175, 139921844154367, +STORE, 139921842053120, 139921844146175, +STORE, 139921844146176, 139921844154367, +ERASE, 139921844146176, 139921844154367, +STORE, 139921844146176, 139921844154367, +STORE, 139921839468544, 139921842036735, +SNULL, 139921839468544, 139921839935487, +STORE, 139921839935488, 139921842036735, +STORE, 139921839468544, 139921839935487, +SNULL, 139921842028543, 139921842036735, +STORE, 139921839935488, 139921842028543, +STORE, 139921842028544, 139921842036735, +ERASE, 139921842028544, 139921842036735, +STORE, 139921842028544, 139921842036735, +STORE, 139921837355008, 139921839468543, +SNULL, 139921837355008, 139921837367295, +STORE, 139921837367296, 139921839468543, +STORE, 139921837355008, 139921837367295, +SNULL, 139921839460351, 139921839468543, +STORE, 139921837367296, 139921839460351, +STORE, 139921839460352, 139921839468543, +ERASE, 139921839460352, 139921839468543, +STORE, 139921839460352, 139921839468543, +STORE, 139921865576448, 139921865601023, +STORE, 139921865564160, 139921865601023, +SNULL, 139921850044415, 139921850052607, +STORE, 139921850028032, 139921850044415, +STORE, 139921850044416, 139921850052607, +SNULL, 139921839464447, 139921839468543, +STORE, 139921839460352, 139921839464447, +STORE, 139921839464448, 139921839468543, +SNULL, 139921852264447, 139921852268543, +STORE, 139921852260352, 139921852264447, +STORE, 139921852264448, 139921852268543, +SNULL, 139921842032639, 139921842036735, +STORE, 139921842028544, 139921842032639, +STORE, 139921842032640, 139921842036735, +SNULL, 139921844150271, 139921844154367, +STORE, 139921844146176, 139921844150271, +STORE, 139921844150272, 139921844154367, +SNULL, 139921846267903, 139921846271999, +STORE, 139921846263808, 139921846267903, +STORE, 139921846267904, 139921846271999, +SNULL, 139921854410751, 139921854414847, +STORE, 139921854406656, 139921854410751, +STORE, 139921854410752, 139921854414847, +SNULL, 139921856663551, 139921856667647, +STORE, 139921856659456, 139921856663551, +STORE, 139921856663552, 139921856667647, +SNULL, 139921858863103, 139921858867199, +STORE, 139921858859008, 139921858863103, +STORE, 139921858863104, 139921858867199, +SNULL, 139921861242879, 139921861246975, +STORE, 139921861226496, 139921861242879, +STORE, 139921861242880, 139921861246975, +SNULL, 139921863380991, 139921863385087, +STORE, 139921863376896, 139921863380991, +STORE, 139921863380992, 139921863385087, +SNULL, 93912950333439, 93912950337535, +STORE, 93912950292480, 93912950333439, +STORE, 93912950333440, 93912950337535, +SNULL, 139921865629695, 139921865633791, +STORE, 139921865625600, 139921865629695, +STORE, 139921865629696, 139921865633791, +ERASE, 139921865601024, 139921865625599, +STORE, 93912968110080, 93912968245247, +STORE, 139921828913152, 139921837355007, +STORE, 139921865621504, 139921865625599, +STORE, 139921865617408, 139921865621503, +STORE, 139921865613312, 139921865617407, +STORE, 139921865547776, 139921865564159, + }; + + unsigned long set17[] = { +STORE, 94397057224704, 94397057646591, +STORE, 94397057650688, 94397057691647, +STORE, 94397057691648, 94397057695743, +STORE, 94397075271680, 94397075406847, +STORE, 139953169051648, 139953169063935, +STORE, 139953169063936, 139953171156991, +STORE, 139953171156992, 139953171161087, +STORE, 139953171161088, 139953171165183, +STORE, 139953171165184, 139953171632127, +STORE, 139953171632128, 139953173725183, +STORE, 139953173725184, 139953173729279, +STORE, 139953173729280, 139953173733375, +STORE, 139953173733376, 139953173749759, +STORE, 139953173749760, 139953175842815, +STORE, 139953175842816, 139953175846911, +STORE, 139953175846912, 139953175851007, +STORE, 139953175851008, 139953175867391, +STORE, 139953175867392, 139953177960447, +STORE, 139953177960448, 139953177964543, +STORE, 139953177964544, 139953177968639, +STORE, 139953177968640, 139953179627519, +STORE, 139953179627520, 139953181724671, +STORE, 139953181724672, 139953181741055, +STORE, 139953181741056, 139953181749247, +STORE, 139953181749248, 139953181765631, +STORE, 139953181765632, 139953181863935, +STORE, 139953181863936, 139953183956991, +STORE, 139953183956992, 139953183961087, +STORE, 139953183961088, 139953183965183, +STORE, 139953183965184, 139953183981567, +STORE, 139953183981568, 139953184010239, +STORE, 139953184010240, 139953186103295, +STORE, 139953186103296, 139953186107391, +STORE, 139953186107392, 139953186111487, +STORE, 139953186111488, 139953186263039, +STORE, 139953186263040, 139953188356095, +STORE, 139953188356096, 139953188360191, +STORE, 139953188360192, 139953188364287, +STORE, 139953188364288, 139953188372479, +STORE, 139953188372480, 139953188462591, +STORE, 139953188462592, 139953190555647, +STORE, 139953190555648, 139953190559743, +STORE, 139953190559744, 139953190563839, +STORE, 139953190563840, 139953190830079, +STORE, 139953190830080, 139953192923135, +STORE, 139953192923136, 139953192939519, +STORE, 139953192939520, 139953192943615, +STORE, 139953192943616, 139953192947711, +STORE, 139953192947712, 139953192976383, +STORE, 139953192976384, 139953195073535, +STORE, 139953195073536, 139953195077631, +STORE, 139953195077632, 139953195081727, +STORE, 139953195081728, 139953195225087, +STORE, 139953197281280, 139953197318143, +STORE, 139953197322240, 139953197326335, +STORE, 139953197326336, 139953197330431, +STORE, 139953197330432, 139953197334527, +STORE, 140720477511680, 140720477646847, +STORE, 140720478302208, 140720478314495, +STORE, 140720478314496, 140720478318591, + }; + unsigned long set18[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140724953673728, 140737488351231, +SNULL, 140724953677823, 140737488351231, +STORE, 140724953673728, 140724953677823, +STORE, 140724953542656, 140724953677823, +STORE, 94675199266816, 94675199311871, +SNULL, 94675199303679, 94675199311871, +STORE, 94675199266816, 94675199303679, +STORE, 94675199303680, 94675199311871, +ERASE, 94675199303680, 94675199311871, +STORE, 94675199303680, 94675199311871, +STORE, 140222970605568, 140222972858367, +SNULL, 140222970748927, 140222972858367, +STORE, 140222970605568, 140222970748927, +STORE, 140222970748928, 140222972858367, +ERASE, 140222970748928, 140222972858367, +STORE, 140222972846080, 140222972854271, +STORE, 140222972854272, 140222972858367, +STORE, 140724954365952, 140724954370047, +STORE, 140724954353664, 140724954365951, +STORE, 140222972841984, 140222972846079, +STORE, 140222972833792, 140222972841983, +STORE, 140222968475648, 140222970605567, +SNULL, 140222968475648, 140222968504319, +STORE, 140222968504320, 140222970605567, +STORE, 140222968475648, 140222968504319, +SNULL, 140222970597375, 140222970605567, +STORE, 140222968504320, 140222970597375, +STORE, 140222970597376, 140222970605567, +ERASE, 140222970597376, 140222970605567, +STORE, 140222970597376, 140222970605567, + }; + unsigned long set19[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140725182459904, 140737488351231, +SNULL, 140725182463999, 140737488351231, +STORE, 140725182459904, 140725182463999, +STORE, 140725182328832, 140725182463999, +STORE, 94730166636544, 94730166763519, +SNULL, 94730166747135, 94730166763519, +STORE, 94730166636544, 94730166747135, +STORE, 94730166747136, 94730166763519, +ERASE, 94730166747136, 94730166763519, +STORE, 94730166751232, 94730166763519, +STORE, 140656834555904, 140656836808703, +SNULL, 140656834699263, 140656836808703, +STORE, 140656834555904, 140656834699263, +STORE, 140656834699264, 140656836808703, +ERASE, 140656834699264, 140656836808703, +STORE, 140656836796416, 140656836804607, +STORE, 140656836804608, 140656836808703, +STORE, 140725183389696, 140725183393791, +STORE, 140725183377408, 140725183389695, +STORE, 140656836788224, 140656836796415, +STORE, 140656832331776, 140656834555903, +SNULL, 140656833982463, 140656834555903, +STORE, 140656832331776, 140656833982463, +STORE, 140656833982464, 140656834555903, +SNULL, 140656833982464, 140656834551807, +STORE, 140656834551808, 140656834555903, +STORE, 140656833982464, 140656834551807, +ERASE, 140656833982464, 140656834551807, +STORE, 140656833982464, 140656834551807, +ERASE, 140656834551808, 140656834555903, +STORE, 140656834551808, 140656834555903, +STORE, 140656836763648, 140656836788223, +STORE, 140656830070784, 140656832331775, +SNULL, 140656830070784, 140656830222335, +STORE, 140656830222336, 140656832331775, +STORE, 140656830070784, 140656830222335, +SNULL, 140656832315391, 140656832331775, +STORE, 140656830222336, 140656832315391, +STORE, 140656832315392, 140656832331775, +SNULL, 140656832315392, 140656832323583, +STORE, 140656832323584, 140656832331775, +STORE, 140656832315392, 140656832323583, +ERASE, 140656832315392, 140656832323583, +STORE, 140656832315392, 140656832323583, +ERASE, 140656832323584, 140656832331775, +STORE, 140656832323584, 140656832331775, +STORE, 140656827940864, 140656830070783, +SNULL, 140656827940864, 140656827969535, +STORE, 140656827969536, 140656830070783, +STORE, 140656827940864, 140656827969535, +SNULL, 140656830062591, 140656830070783, +STORE, 140656827969536, 140656830062591, +STORE, 140656830062592, 140656830070783, +ERASE, 140656830062592, 140656830070783, +STORE, 140656830062592, 140656830070783, +STORE, 140656825724928, 140656827940863, +SNULL, 140656825724928, 140656825823231, +STORE, 140656825823232, 140656827940863, +STORE, 140656825724928, 140656825823231, +SNULL, 140656827916287, 140656827940863, +STORE, 140656825823232, 140656827916287, +STORE, 140656827916288, 140656827940863, +SNULL, 140656827916288, 140656827924479, +STORE, 140656827924480, 140656827940863, +STORE, 140656827916288, 140656827924479, +ERASE, 140656827916288, 140656827924479, +STORE, 140656827916288, 140656827924479, +ERASE, 140656827924480, 140656827940863, +STORE, 140656827924480, 140656827940863, +STORE, 140656821927936, 140656825724927, +SNULL, 140656821927936, 140656823586815, +STORE, 140656823586816, 140656825724927, +STORE, 140656821927936, 140656823586815, +SNULL, 140656825683967, 140656825724927, +STORE, 140656823586816, 140656825683967, +STORE, 140656825683968, 140656825724927, +SNULL, 140656825683968, 140656825708543, +STORE, 140656825708544, 140656825724927, +STORE, 140656825683968, 140656825708543, +ERASE, 140656825683968, 140656825708543, +STORE, 140656825683968, 140656825708543, +ERASE, 140656825708544, 140656825724927, +STORE, 140656825708544, 140656825724927, +STORE, 140656819806208, 140656821927935, +SNULL, 140656819806208, 140656819822591, +STORE, 140656819822592, 140656821927935, +STORE, 140656819806208, 140656819822591, +SNULL, 140656821919743, 140656821927935, +STORE, 140656819822592, 140656821919743, +STORE, 140656821919744, 140656821927935, +ERASE, 140656821919744, 140656821927935, +STORE, 140656821919744, 140656821927935, +STORE, 140656836755456, 140656836763647, +STORE, 140656817553408, 140656819806207, +SNULL, 140656817553408, 140656817704959, +STORE, 140656817704960, 140656819806207, +STORE, 140656817553408, 140656817704959, +SNULL, 140656819798015, 140656819806207, +STORE, 140656817704960, 140656819798015, +STORE, 140656819798016, 140656819806207, +ERASE, 140656819798016, 140656819806207, +STORE, 140656819798016, 140656819806207, +STORE, 140656815382528, 140656817553407, +SNULL, 140656815382528, 140656815452159, +STORE, 140656815452160, 140656817553407, +STORE, 140656815382528, 140656815452159, +SNULL, 140656817545215, 140656817553407, +STORE, 140656815452160, 140656817545215, +STORE, 140656817545216, 140656817553407, +ERASE, 140656817545216, 140656817553407, +STORE, 140656817545216, 140656817553407, +STORE, 140656812171264, 140656815382527, +SNULL, 140656812171264, 140656813248511, +STORE, 140656813248512, 140656815382527, +STORE, 140656812171264, 140656813248511, +SNULL, 140656815345663, 140656815382527, +STORE, 140656813248512, 140656815345663, +STORE, 140656815345664, 140656815382527, +ERASE, 140656815345664, 140656815382527, +STORE, 140656815345664, 140656815382527, +STORE, 140656810037248, 140656812171263, +SNULL, 140656810037248, 140656810065919, +STORE, 140656810065920, 140656812171263, +STORE, 140656810037248, 140656810065919, +SNULL, 140656812163071, 140656812171263, +STORE, 140656810065920, 140656812163071, +STORE, 140656812163072, 140656812171263, +ERASE, 140656812163072, 140656812171263, +STORE, 140656812163072, 140656812171263, +STORE, 140656807727104, 140656810037247, +SNULL, 140656807727104, 140656807931903, +STORE, 140656807931904, 140656810037247, +STORE, 140656807727104, 140656807931903, +SNULL, 140656810029055, 140656810037247, +STORE, 140656807931904, 140656810029055, +STORE, 140656810029056, 140656810037247, +ERASE, 140656810029056, 140656810037247, +STORE, 140656810029056, 140656810037247, +STORE, 140656805343232, 140656807727103, +SNULL, 140656805343232, 140656805535743, +STORE, 140656805535744, 140656807727103, +STORE, 140656805343232, 140656805535743, +SNULL, 140656807628799, 140656807727103, +STORE, 140656805535744, 140656807628799, +STORE, 140656807628800, 140656807727103, +ERASE, 140656807628800, 140656807727103, +STORE, 140656807628800, 140656807727103, +STORE, 140656836747264, 140656836763647, +STORE, 140656802775040, 140656805343231, +SNULL, 140656802775040, 140656803241983, +STORE, 140656803241984, 140656805343231, +STORE, 140656802775040, 140656803241983, +SNULL, 140656805335039, 140656805343231, +STORE, 140656803241984, 140656805335039, +STORE, 140656805335040, 140656805343231, +ERASE, 140656805335040, 140656805343231, +STORE, 140656805335040, 140656805343231, +STORE, 140656800661504, 140656802775039, +SNULL, 140656800661504, 140656800673791, +STORE, 140656800673792, 140656802775039, +STORE, 140656800661504, 140656800673791, +SNULL, 140656802766847, 140656802775039, +STORE, 140656800673792, 140656802766847, +STORE, 140656802766848, 140656802775039, +ERASE, 140656802766848, 140656802775039, +STORE, 140656802766848, 140656802775039, +STORE, 140656798482432, 140656800661503, +SNULL, 140656798482432, 140656798560255, +STORE, 140656798560256, 140656800661503, +STORE, 140656798482432, 140656798560255, +SNULL, 140656800653311, 140656800661503, +STORE, 140656798560256, 140656800653311, +STORE, 140656800653312, 140656800661503, +ERASE, 140656800653312, 140656800661503, +STORE, 140656800653312, 140656800661503, +STORE, 140656796364800, 140656798482431, +SNULL, 140656796364800, 140656796381183, +STORE, 140656796381184, 140656798482431, +STORE, 140656796364800, 140656796381183, +SNULL, 140656798474239, 140656798482431, +STORE, 140656796381184, 140656798474239, +STORE, 140656798474240, 140656798482431, +ERASE, 140656798474240, 140656798482431, +STORE, 140656798474240, 140656798482431, +STORE, 140656836739072, 140656836763647, +STORE, 140656836726784, 140656836763647, +SNULL, 140656825700351, 140656825708543, +STORE, 140656825683968, 140656825700351, +STORE, 140656825700352, 140656825708543, +SNULL, 140656798478335, 140656798482431, +STORE, 140656798474240, 140656798478335, +STORE, 140656798478336, 140656798482431, +SNULL, 140656800657407, 140656800661503, +STORE, 140656800653312, 140656800657407, +STORE, 140656800657408, 140656800661503, +SNULL, 140656802770943, 140656802775039, +STORE, 140656802766848, 140656802770943, +STORE, 140656802770944, 140656802775039, +SNULL, 140656827920383, 140656827924479, +STORE, 140656827916288, 140656827920383, +STORE, 140656827920384, 140656827924479, +SNULL, 140656805339135, 140656805343231, +STORE, 140656805335040, 140656805339135, +STORE, 140656805339136, 140656805343231, +SNULL, 140656807723007, 140656807727103, +STORE, 140656807628800, 140656807723007, +STORE, 140656807723008, 140656807727103, +SNULL, 140656810033151, 140656810037247, +STORE, 140656810029056, 140656810033151, +STORE, 140656810033152, 140656810037247, +SNULL, 140656812167167, 140656812171263, +STORE, 140656812163072, 140656812167167, +STORE, 140656812167168, 140656812171263, +SNULL, 140656815353855, 140656815382527, +STORE, 140656815345664, 140656815353855, +STORE, 140656815353856, 140656815382527, +SNULL, 140656817549311, 140656817553407, +STORE, 140656817545216, 140656817549311, +STORE, 140656817549312, 140656817553407, +SNULL, 140656819802111, 140656819806207, +STORE, 140656819798016, 140656819802111, +STORE, 140656819802112, 140656819806207, +SNULL, 140656821923839, 140656821927935, +STORE, 140656821919744, 140656821923839, +STORE, 140656821923840, 140656821927935, +SNULL, 140656830066687, 140656830070783, +STORE, 140656830062592, 140656830066687, +STORE, 140656830066688, 140656830070783, +SNULL, 140656832319487, 140656832323583, +STORE, 140656832315392, 140656832319487, +STORE, 140656832319488, 140656832323583, +SNULL, 140656834547711, 140656834551807, +STORE, 140656833982464, 140656834547711, +STORE, 140656834547712, 140656834551807, +SNULL, 94730166759423, 94730166763519, +STORE, 94730166751232, 94730166759423, +STORE, 94730166759424, 94730166763519, +SNULL, 140656836800511, 140656836804607, +STORE, 140656836796416, 140656836800511, +STORE, 140656836800512, 140656836804607, +ERASE, 140656836763648, 140656836788223, +STORE, 94730171318272, 94730171453439, +STORE, 140656836784128, 140656836788223, +STORE, 140656836780032, 140656836784127, +STORE, 140656791920640, 140656796364799, +STORE, 140656836775936, 140656836780031, +STORE, 140656787476480, 140656791920639, +STORE, 140656779083776, 140656787476479, +SNULL, 140656779087871, 140656787476479, +STORE, 140656779083776, 140656779087871, +STORE, 140656779087872, 140656787476479, +STORE, 140656836771840, 140656836775935, +STORE, 140656774639616, 140656779083775, +STORE, 140656766246912, 140656774639615, +SNULL, 140656766251007, 140656774639615, +STORE, 140656766246912, 140656766251007, +STORE, 140656766251008, 140656774639615, +ERASE, 140656791920640, 140656796364799, +ERASE, 140656836780032, 140656836784127, +ERASE, 140656787476480, 140656791920639, +ERASE, 140656836775936, 140656836780031, +STORE, 140656836780032, 140656836784127, +STORE, 140656791920640, 140656796364799, +STORE, 140656836775936, 140656836780031, +STORE, 140656787476480, 140656791920639, +ERASE, 140656774639616, 140656779083775, + }; + unsigned long set20[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140735952392192, 140737488351231, +SNULL, 140735952396287, 140737488351231, +STORE, 140735952392192, 140735952396287, +STORE, 140735952261120, 140735952396287, +STORE, 94849008947200, 94849009414143, +SNULL, 94849009364991, 94849009414143, +STORE, 94849008947200, 94849009364991, +STORE, 94849009364992, 94849009414143, +ERASE, 94849009364992, 94849009414143, +STORE, 94849009364992, 94849009414143, +STORE, 140590397943808, 140590400196607, +SNULL, 140590398087167, 140590400196607, +STORE, 140590397943808, 140590398087167, +STORE, 140590398087168, 140590400196607, +ERASE, 140590398087168, 140590400196607, +STORE, 140590400184320, 140590400192511, +STORE, 140590400192512, 140590400196607, +STORE, 140735952850944, 140735952855039, +STORE, 140735952838656, 140735952850943, +STORE, 140590400180224, 140590400184319, +STORE, 140590400172032, 140590400180223, +STORE, 140590395809792, 140590397943807, +SNULL, 140590395809792, 140590395838463, +STORE, 140590395838464, 140590397943807, +STORE, 140590395809792, 140590395838463, +SNULL, 140590397935615, 140590397943807, +STORE, 140590395838464, 140590397935615, +STORE, 140590397935616, 140590397943807, +ERASE, 140590397935616, 140590397943807, +STORE, 140590397935616, 140590397943807, +STORE, 140590393425920, 140590395809791, +SNULL, 140590393425920, 140590393692159, +STORE, 140590393692160, 140590395809791, +STORE, 140590393425920, 140590393692159, +SNULL, 140590395785215, 140590395809791, +STORE, 140590393692160, 140590395785215, +STORE, 140590395785216, 140590395809791, +SNULL, 140590395785216, 140590395805695, +STORE, 140590395805696, 140590395809791, +STORE, 140590395785216, 140590395805695, +ERASE, 140590395785216, 140590395805695, +STORE, 140590395785216, 140590395805695, +ERASE, 140590395805696, 140590395809791, +STORE, 140590395805696, 140590395809791, +STORE, 140590391234560, 140590393425919, +SNULL, 140590391234560, 140590391324671, +STORE, 140590391324672, 140590393425919, +STORE, 140590391234560, 140590391324671, +SNULL, 140590393417727, 140590393425919, +STORE, 140590391324672, 140590393417727, +STORE, 140590393417728, 140590393425919, +ERASE, 140590393417728, 140590393425919, +STORE, 140590393417728, 140590393425919, +STORE, 140590388973568, 140590391234559, +SNULL, 140590388973568, 140590389125119, +STORE, 140590389125120, 140590391234559, +STORE, 140590388973568, 140590389125119, +SNULL, 140590391218175, 140590391234559, +STORE, 140590389125120, 140590391218175, +STORE, 140590391218176, 140590391234559, +SNULL, 140590391218176, 140590391226367, +STORE, 140590391226368, 140590391234559, +STORE, 140590391218176, 140590391226367, +ERASE, 140590391218176, 140590391226367, +STORE, 140590391218176, 140590391226367, +ERASE, 140590391226368, 140590391234559, +STORE, 140590391226368, 140590391234559, +STORE, 140590386843648, 140590388973567, +SNULL, 140590386843648, 140590386872319, +STORE, 140590386872320, 140590388973567, +STORE, 140590386843648, 140590386872319, +SNULL, 140590388965375, 140590388973567, +STORE, 140590386872320, 140590388965375, +STORE, 140590388965376, 140590388973567, +ERASE, 140590388965376, 140590388973567, +STORE, 140590388965376, 140590388973567, +STORE, 140590384627712, 140590386843647, +SNULL, 140590384627712, 140590384726015, +STORE, 140590384726016, 140590386843647, +STORE, 140590384627712, 140590384726015, +SNULL, 140590386819071, 140590386843647, +STORE, 140590384726016, 140590386819071, +STORE, 140590386819072, 140590386843647, +SNULL, 140590386819072, 140590386827263, +STORE, 140590386827264, 140590386843647, +STORE, 140590386819072, 140590386827263, +ERASE, 140590386819072, 140590386827263, +STORE, 140590386819072, 140590386827263, +ERASE, 140590386827264, 140590386843647, +STORE, 140590386827264, 140590386843647, +STORE, 140590400163840, 140590400180223, +STORE, 140590380830720, 140590384627711, +SNULL, 140590380830720, 140590382489599, +STORE, 140590382489600, 140590384627711, +STORE, 140590380830720, 140590382489599, +SNULL, 140590384586751, 140590384627711, +STORE, 140590382489600, 140590384586751, +STORE, 140590384586752, 140590384627711, +SNULL, 140590384586752, 140590384611327, +STORE, 140590384611328, 140590384627711, +STORE, 140590384586752, 140590384611327, +ERASE, 140590384586752, 140590384611327, +STORE, 140590384586752, 140590384611327, +ERASE, 140590384611328, 140590384627711, +STORE, 140590384611328, 140590384627711, +STORE, 140590378713088, 140590380830719, +SNULL, 140590378713088, 140590378729471, +STORE, 140590378729472, 140590380830719, +STORE, 140590378713088, 140590378729471, +SNULL, 140590380822527, 140590380830719, +STORE, 140590378729472, 140590380822527, +STORE, 140590380822528, 140590380830719, +ERASE, 140590380822528, 140590380830719, +STORE, 140590380822528, 140590380830719, +STORE, 140590376595456, 140590378713087, +SNULL, 140590376595456, 140590376611839, +STORE, 140590376611840, 140590378713087, +STORE, 140590376595456, 140590376611839, +SNULL, 140590378704895, 140590378713087, +STORE, 140590376611840, 140590378704895, +STORE, 140590378704896, 140590378713087, +ERASE, 140590378704896, 140590378713087, +STORE, 140590378704896, 140590378713087, +STORE, 140590374027264, 140590376595455, +SNULL, 140590374027264, 140590374494207, +STORE, 140590374494208, 140590376595455, +STORE, 140590374027264, 140590374494207, +SNULL, 140590376587263, 140590376595455, +STORE, 140590374494208, 140590376587263, +STORE, 140590376587264, 140590376595455, +ERASE, 140590376587264, 140590376595455, +STORE, 140590376587264, 140590376595455, +STORE, 140590371913728, 140590374027263, +SNULL, 140590371913728, 140590371926015, +STORE, 140590371926016, 140590374027263, +STORE, 140590371913728, 140590371926015, +SNULL, 140590374019071, 140590374027263, +STORE, 140590371926016, 140590374019071, +STORE, 140590374019072, 140590374027263, +ERASE, 140590374019072, 140590374027263, +STORE, 140590374019072, 140590374027263, +STORE, 140590400155648, 140590400180223, +STORE, 140590400143360, 140590400180223, +SNULL, 140590384603135, 140590384611327, +STORE, 140590384586752, 140590384603135, +STORE, 140590384603136, 140590384611327, +SNULL, 140590374023167, 140590374027263, +STORE, 140590374019072, 140590374023167, +STORE, 140590374023168, 140590374027263, +SNULL, 140590386823167, 140590386827263, +STORE, 140590386819072, 140590386823167, +STORE, 140590386823168, 140590386827263, +SNULL, 140590376591359, 140590376595455, + }; + unsigned long set21[] = { +STORE, 93874710941696, 93874711363583, +STORE, 93874711367680, 93874711408639, +STORE, 93874711408640, 93874711412735, +STORE, 93874720989184, 93874721124351, +STORE, 140708365086720, 140708365099007, +STORE, 140708365099008, 140708367192063, +STORE, 140708367192064, 140708367196159, +STORE, 140708367196160, 140708367200255, +STORE, 140708367200256, 140708367667199, +STORE, 140708367667200, 140708369760255, +STORE, 140708369760256, 140708369764351, +STORE, 140708369764352, 140708369768447, +STORE, 140708369768448, 140708369784831, +STORE, 140708369784832, 140708371877887, +STORE, 140708371877888, 140708371881983, +STORE, 140708371881984, 140708371886079, +STORE, 140708371886080, 140708371902463, +STORE, 140708371902464, 140708373995519, +STORE, 140708373995520, 140708373999615, +STORE, 140708373999616, 140708374003711, +STORE, 140708374003712, 140708375662591, +STORE, 140708375662592, 140708377759743, +STORE, 140708377759744, 140708377776127, +STORE, 140708377776128, 140708377784319, +STORE, 140708377784320, 140708377800703, +STORE, 140708377800704, 140708377899007, +STORE, 140708377899008, 140708379992063, +STORE, 140708379992064, 140708379996159, +STORE, 140708379996160, 140708380000255, +STORE, 140708380000256, 140708380016639, +STORE, 140708380016640, 140708380045311, +STORE, 140708380045312, 140708382138367, +STORE, 140708382138368, 140708382142463, +STORE, 140708382142464, 140708382146559, +STORE, 140708382146560, 140708382298111, +STORE, 140708382298112, 140708384391167, +STORE, 140708384391168, 140708384395263, +STORE, 140708384395264, 140708384399359, +STORE, 140708384399360, 140708384407551, +STORE, 140708384407552, 140708384497663, +STORE, 140708384497664, 140708386590719, +STORE, 140708386590720, 140708386594815, +STORE, 140708386594816, 140708386598911, +STORE, 140708386598912, 140708386865151, +STORE, 140708386865152, 140708388958207, +STORE, 140708388958208, 140708388974591, +STORE, 140708388974592, 140708388978687, +STORE, 140708388978688, 140708388982783, +STORE, 140708388982784, 140708389011455, +STORE, 140708389011456, 140708391108607, +STORE, 140708391108608, 140708391112703, +STORE, 140708391112704, 140708391116799, +STORE, 140708391116800, 140708391260159, +STORE, 140708393291776, 140708393308159, +STORE, 140708393308160, 140708393312255, +STORE, 140708393312256, 140708393316351, +STORE, 140708393316352, 140708393353215, +STORE, 140708393353216, 140708393357311, +STORE, 140708393357312, 140708393361407, +STORE, 140708393361408, 140708393365503, +STORE, 140708393365504, 140708393369599, +STORE, 140730557042688, 140730557177855, +STORE, 140730557235200, 140730557247487, +STORE, 140730557247488, 140730557251583, +ERASE, 140708393353216, 140708393357311, +ERASE, 140708393312256, 140708393316351, +ERASE, 140708393308160, 140708393312255, +ERASE, 140708393291776, 140708393308159, + }; + unsigned long set22[] = { +STORE, 93951397134336, 93951397183487, +STORE, 93951397183488, 93951397728255, +STORE, 93951397728256, 93951397826559, +STORE, 93951397826560, 93951397842943, +STORE, 93951397842944, 93951397847039, +STORE, 93951425974272, 93951426109439, +STORE, 140685152665600, 140685152677887, +STORE, 140685152677888, 140685152829439, +STORE, 140685152829440, 140685154181119, +STORE, 140685154181120, 140685154484223, +STORE, 140685154484224, 140685154496511, +STORE, 140685154496512, 140685154508799, +STORE, 140685154508800, 140685154525183, +STORE, 140685154525184, 140685154541567, +STORE, 140685154541568, 140685154590719, +STORE, 140685154590720, 140685154603007, +STORE, 140685154603008, 140685154607103, +STORE, 140685154607104, 140685154611199, +STORE, 140685154611200, 140685154615295, +STORE, 140685154615296, 140685154631679, +STORE, 140685154639872, 140685154643967, +STORE, 140685154643968, 140685154766847, +STORE, 140685154766848, 140685154799615, +STORE, 140685154803712, 140685154807807, +STORE, 140685154807808, 140685154811903, +STORE, 140685154811904, 140685154815999, +STORE, 140722188902400, 140722189037567, +STORE, 140722189512704, 140722189524991, +STORE, 140722189524992, 140722189529087, +STORE, 140737488347136, 140737488351231, +STORE, 140733429354496, 140737488351231, +SNULL, 140733429358591, 140737488351231, +STORE, 140733429354496, 140733429358591, +STORE, 140733429223424, 140733429358591, +STORE, 94526683537408, 94526683660287, +SNULL, 94526683553791, 94526683660287, +STORE, 94526683537408, 94526683553791, +STORE, 94526683553792, 94526683660287, +ERASE, 94526683553792, 94526683660287, +STORE, 94526683553792, 94526683623423, +STORE, 94526683623424, 94526683647999, +STORE, 94526683652096, 94526683660287, +STORE, 140551363747840, 140551363923967, +SNULL, 140551363751935, 140551363923967, +STORE, 140551363747840, 140551363751935, +STORE, 140551363751936, 140551363923967, +ERASE, 140551363751936, 140551363923967, +STORE, 140551363751936, 140551363874815, +STORE, 140551363874816, 140551363907583, +STORE, 140551363911680, 140551363919871, +STORE, 140551363919872, 140551363923967, +STORE, 140733429690368, 140733429694463, +STORE, 140733429678080, 140733429690367, +STORE, 140551363739648, 140551363747839, +STORE, 140551363731456, 140551363739647, +STORE, 140551363379200, 140551363731455, +SNULL, 140551363379200, 140551363420159, +STORE, 140551363420160, 140551363731455, +STORE, 140551363379200, 140551363420159, +SNULL, 140551363706879, 140551363731455, +STORE, 140551363420160, 140551363706879, +STORE, 140551363706880, 140551363731455, +SNULL, 140551363420160, 140551363637247, +STORE, 140551363637248, 140551363706879, +STORE, 140551363420160, 140551363637247, +ERASE, 140551363420160, 140551363637247, +STORE, 140551363420160, 140551363637247, +SNULL, 140551363637248, 140551363702783, +STORE, 140551363702784, 140551363706879, +STORE, 140551363637248, 140551363702783, +ERASE, 140551363637248, 140551363702783, +STORE, 140551363637248, 140551363702783, +ERASE, 140551363706880, 140551363731455, +STORE, 140551363706880, 140551363731455, +STORE, 140551361531904, 140551363379199, +SNULL, 140551361683455, 140551363379199, +STORE, 140551361531904, 140551361683455, +STORE, 140551361683456, 140551363379199, +SNULL, 140551361683456, 140551363035135, +STORE, 140551363035136, 140551363379199, +STORE, 140551361683456, 140551363035135, +ERASE, 140551361683456, 140551363035135, +STORE, 140551361683456, 140551363035135, +SNULL, 140551363035136, 140551363338239, +STORE, 140551363338240, 140551363379199, +STORE, 140551363035136, 140551363338239, +ERASE, 140551363035136, 140551363338239, +STORE, 140551363035136, 140551363379199, +SNULL, 140551363338239, 140551363379199, +STORE, 140551363035136, 140551363338239, +STORE, 140551363338240, 140551363379199, +SNULL, 140551363338240, 140551363362815, +STORE, 140551363362816, 140551363379199, +STORE, 140551363338240, 140551363362815, +ERASE, 140551363338240, 140551363362815, +STORE, 140551363338240, 140551363362815, +ERASE, 140551363362816, 140551363379199, +STORE, 140551363362816, 140551363379199, +STORE, 140551361519616, 140551361531903, +SNULL, 140551363350527, 140551363362815, +STORE, 140551363338240, 140551363350527, +STORE, 140551363350528, 140551363362815, +SNULL, 140551363727359, 140551363731455, +STORE, 140551363706880, 140551363727359, +STORE, 140551363727360, 140551363731455, +SNULL, 94526683656191, 94526683660287, +STORE, 94526683652096, 94526683656191, +STORE, 94526683656192, 94526683660287, +SNULL, 140551363915775, 140551363919871, +STORE, 140551363911680, 140551363915775, +STORE, 140551363915776, 140551363919871, +ERASE, 140551363739648, 140551363747839, +STORE, 94526715490304, 94526715625471, +STORE, 140551361253376, 140551361531903, +STORE, 140551360987136, 140551361531903, +STORE, 140551360720896, 140551361531903, +STORE, 140551360454656, 140551361531903, +SNULL, 140551361253375, 140551361531903, +STORE, 140551360454656, 140551361253375, +STORE, 140551361253376, 140551361531903, +SNULL, 140551361253376, 140551361519615, +STORE, 140551361519616, 140551361531903, +STORE, 140551361253376, 140551361519615, +ERASE, 140551361253376, 140551361519615, + }; + + unsigned long set23[] = { +STORE, 94014447943680, 94014448156671, +STORE, 94014450253824, 94014450257919, +STORE, 94014450257920, 94014450266111, +STORE, 94014450266112, 94014450278399, +STORE, 94014464225280, 94014464630783, +STORE, 139761764306944, 139761765965823, +STORE, 139761765965824, 139761768062975, +STORE, 139761768062976, 139761768079359, +STORE, 139761768079360, 139761768087551, +STORE, 139761768087552, 139761768103935, +STORE, 139761768103936, 139761768116223, +STORE, 139761768116224, 139761770209279, +STORE, 139761770209280, 139761770213375, +STORE, 139761770213376, 139761770217471, +STORE, 139761770217472, 139761770360831, +STORE, 139761770729472, 139761772412927, +STORE, 139761772412928, 139761772429311, +STORE, 139761772457984, 139761772462079, +STORE, 139761772462080, 139761772466175, +STORE, 139761772466176, 139761772470271, +STORE, 140724336517120, 140724336652287, +STORE, 140724336955392, 140724336967679, +STORE, 140724336967680, 140724336971775, +STORE, 140737488347136, 140737488351231, +STORE, 140721840295936, 140737488351231, +SNULL, 140721840300031, 140737488351231, +STORE, 140721840295936, 140721840300031, +STORE, 140721840164864, 140721840300031, +STORE, 93937913667584, 93937915830271, +SNULL, 93937913729023, 93937915830271, +STORE, 93937913667584, 93937913729023, +STORE, 93937913729024, 93937915830271, +ERASE, 93937913729024, 93937915830271, +STORE, 93937915822080, 93937915830271, +STORE, 140598835335168, 140598837587967, +SNULL, 140598835478527, 140598837587967, +STORE, 140598835335168, 140598835478527, +STORE, 140598835478528, 140598837587967, +ERASE, 140598835478528, 140598837587967, +STORE, 140598837575680, 140598837583871, +STORE, 140598837583872, 140598837587967, +STORE, 140721841086464, 140721841090559, +STORE, 140721841074176, 140721841086463, +STORE, 140598837547008, 140598837575679, +STORE, 140598837538816, 140598837547007, +STORE, 140598831538176, 140598835335167, +SNULL, 140598831538176, 140598833197055, +STORE, 140598833197056, 140598835335167, +STORE, 140598831538176, 140598833197055, +SNULL, 140598835294207, 140598835335167, +STORE, 140598833197056, 140598835294207, +STORE, 140598835294208, 140598835335167, +SNULL, 140598835294208, 140598835318783, +STORE, 140598835318784, 140598835335167, +STORE, 140598835294208, 140598835318783, +ERASE, 140598835294208, 140598835318783, +STORE, 140598835294208, 140598835318783, +ERASE, 140598835318784, 140598835335167, +STORE, 140598835318784, 140598835335167, +SNULL, 140598835310591, 140598835318783, +STORE, 140598835294208, 140598835310591, +STORE, 140598835310592, 140598835318783, +SNULL, 93937915826175, 93937915830271, +STORE, 93937915822080, 93937915826175, +STORE, 93937915826176, 93937915830271, +SNULL, 140598837579775, 140598837583871, +STORE, 140598837575680, 140598837579775, +STORE, 140598837579776, 140598837583871, +ERASE, 140598837547008, 140598837575679, +STORE, 93937929179136, 93937929314303, +STORE, 140598835855360, 140598837538815, +STORE, 140737488347136, 140737488351231, +STORE, 140728187723776, 140737488351231, +SNULL, 140728187727871, 140737488351231, +STORE, 140728187723776, 140728187727871, +STORE, 140728187592704, 140728187727871, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140583951437824, 140583953690623, +SNULL, 140583951581183, 140583953690623, +STORE, 140583951437824, 140583951581183, +STORE, 140583951581184, 140583953690623, +ERASE, 140583951581184, 140583953690623, +STORE, 140583953678336, 140583953686527, +STORE, 140583953686528, 140583953690623, +STORE, 140728189116416, 140728189120511, +STORE, 140728189104128, 140728189116415, +STORE, 140583953649664, 140583953678335, +STORE, 140583953641472, 140583953649663, +STORE, 140583948275712, 140583951437823, +SNULL, 140583948275712, 140583949336575, +STORE, 140583949336576, 140583951437823, +STORE, 140583948275712, 140583949336575, +SNULL, 140583951429631, 140583951437823, +STORE, 140583949336576, 140583951429631, +STORE, 140583951429632, 140583951437823, +ERASE, 140583951429632, 140583951437823, +STORE, 140583951429632, 140583951437823, +STORE, 140583944478720, 140583948275711, +SNULL, 140583944478720, 140583946137599, +STORE, 140583946137600, 140583948275711, +STORE, 140583944478720, 140583946137599, +SNULL, 140583948234751, 140583948275711, +STORE, 140583946137600, 140583948234751, +STORE, 140583948234752, 140583948275711, +SNULL, 140583948234752, 140583948259327, +STORE, 140583948259328, 140583948275711, +STORE, 140583948234752, 140583948259327, +ERASE, 140583948234752, 140583948259327, +STORE, 140583948234752, 140583948259327, +ERASE, 140583948259328, 140583948275711, +STORE, 140583948259328, 140583948275711, +STORE, 140583953629184, 140583953649663, +SNULL, 140583948251135, 140583948259327, +STORE, 140583948234752, 140583948251135, +STORE, 140583948251136, 140583948259327, +SNULL, 140583951433727, 140583951437823, +STORE, 140583951429632, 140583951433727, +STORE, 140583951433728, 140583951437823, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140583953682431, 140583953686527, +STORE, 140583953678336, 140583953682431, +STORE, 140583953682432, 140583953686527, +ERASE, 140583953649664, 140583953678335, +STORE, 17821696, 17956863, +STORE, 17821696, 18104319, +STORE, 140583951945728, 140583953629183, +STORE, 94014447943680, 94014448156671, +STORE, 94014450253824, 94014450257919, +STORE, 94014450257920, 94014450266111, +STORE, 94014450266112, 94014450278399, +STORE, 94014464225280, 94014465196031, +STORE, 139761764306944, 139761765965823, +STORE, 139761765965824, 139761768062975, +STORE, 139761768062976, 139761768079359, +STORE, 139761768079360, 139761768087551, +STORE, 139761768087552, 139761768103935, +STORE, 139761768103936, 139761768116223, +STORE, 139761768116224, 139761770209279, +STORE, 139761770209280, 139761770213375, +STORE, 139761770213376, 139761770217471, +STORE, 139761770217472, 139761770360831, +STORE, 139761770729472, 139761772412927, +STORE, 139761772412928, 139761772429311, +STORE, 139761772457984, 139761772462079, +STORE, 139761772462080, 139761772466175, +STORE, 139761772466176, 139761772470271, +STORE, 140724336517120, 140724336652287, +STORE, 140724336955392, 140724336967679, +STORE, 140724336967680, 140724336971775, +STORE, 140737488347136, 140737488351231, +STORE, 140726063296512, 140737488351231, +SNULL, 140726063300607, 140737488351231, +STORE, 140726063296512, 140726063300607, +STORE, 140726063165440, 140726063300607, +STORE, 94016795934720, 94016798158847, +SNULL, 94016796045311, 94016798158847, +STORE, 94016795934720, 94016796045311, +STORE, 94016796045312, 94016798158847, +ERASE, 94016796045312, 94016798158847, +STORE, 94016798138368, 94016798150655, +STORE, 94016798150656, 94016798158847, +STORE, 139975915966464, 139975918219263, +SNULL, 139975916109823, 139975918219263, +STORE, 139975915966464, 139975916109823, +STORE, 139975916109824, 139975918219263, +ERASE, 139975916109824, 139975918219263, +STORE, 139975918206976, 139975918215167, +STORE, 139975918215168, 139975918219263, +STORE, 140726064541696, 140726064545791, +STORE, 140726064529408, 140726064541695, +STORE, 139975918178304, 139975918206975, +STORE, 139975918170112, 139975918178303, +STORE, 139975912169472, 139975915966463, +SNULL, 139975912169472, 139975913828351, +STORE, 139975913828352, 139975915966463, +STORE, 139975912169472, 139975913828351, +SNULL, 139975915925503, 139975915966463, +STORE, 139975913828352, 139975915925503, +STORE, 139975915925504, 139975915966463, +SNULL, 139975915925504, 139975915950079, +STORE, 139975915950080, 139975915966463, +STORE, 139975915925504, 139975915950079, +ERASE, 139975915925504, 139975915950079, +STORE, 139975915925504, 139975915950079, +ERASE, 139975915950080, 139975915966463, +STORE, 139975915950080, 139975915966463, +SNULL, 139975915941887, 139975915950079, +STORE, 139975915925504, 139975915941887, +STORE, 139975915941888, 139975915950079, +SNULL, 94016798146559, 94016798150655, +STORE, 94016798138368, 94016798146559, +STORE, 94016798146560, 94016798150655, +SNULL, 139975918211071, 139975918215167, +STORE, 139975918206976, 139975918211071, +STORE, 139975918211072, 139975918215167, +ERASE, 139975918178304, 139975918206975, +STORE, 94016804925440, 94016805060607, +STORE, 94596177661952, 94596177772543, +STORE, 94596179865600, 94596179873791, +STORE, 94596179873792, 94596179877887, +STORE, 94596179877888, 94596179886079, +STORE, 94596211597312, 94596211863551, +STORE, 140127351840768, 140127353499647, +STORE, 140127353499648, 140127355596799, +STORE, 140127355596800, 140127355613183, +STORE, 140127355613184, 140127355621375, +STORE, 140127355621376, 140127355637759, +STORE, 140127355637760, 140127355781119, +STORE, 140127357841408, 140127357849599, +STORE, 140127357878272, 140127357882367, +STORE, 140127357882368, 140127357886463, +STORE, 140127357886464, 140127357890559, +STORE, 140726167252992, 140726167392255, +STORE, 140726167838720, 140726167851007, +STORE, 140726167851008, 140726167855103, +STORE, 140737488347136, 140737488351231, +STORE, 140731874017280, 140737488351231, +SNULL, 140731874021375, 140737488351231, +STORE, 140731874017280, 140731874021375, +STORE, 140731873886208, 140731874021375, +STORE, 94178682265600, 94178684489727, +SNULL, 94178682376191, 94178684489727, +STORE, 94178682265600, 94178682376191, +STORE, 94178682376192, 94178684489727, +ERASE, 94178682376192, 94178684489727, +STORE, 94178684469248, 94178684481535, +STORE, 94178684481536, 94178684489727, +STORE, 140460853403648, 140460855656447, +SNULL, 140460853547007, 140460855656447, +STORE, 140460853403648, 140460853547007, +STORE, 140460853547008, 140460855656447, +ERASE, 140460853547008, 140460855656447, +STORE, 140460855644160, 140460855652351, +STORE, 140460855652352, 140460855656447, +STORE, 140731874103296, 140731874107391, +STORE, 140731874091008, 140731874103295, +STORE, 140460855615488, 140460855644159, +STORE, 140460855607296, 140460855615487, +STORE, 140460849606656, 140460853403647, +SNULL, 140460849606656, 140460851265535, +STORE, 140460851265536, 140460853403647, +STORE, 140460849606656, 140460851265535, +SNULL, 140460853362687, 140460853403647, +STORE, 140460851265536, 140460853362687, +STORE, 140460853362688, 140460853403647, +SNULL, 140460853362688, 140460853387263, +STORE, 140460853387264, 140460853403647, +STORE, 140460853362688, 140460853387263, +ERASE, 140460853362688, 140460853387263, +STORE, 140460853362688, 140460853387263, +ERASE, 140460853387264, 140460853403647, +STORE, 140460853387264, 140460853403647, +SNULL, 140460853379071, 140460853387263, +STORE, 140460853362688, 140460853379071, +STORE, 140460853379072, 140460853387263, +SNULL, 94178684477439, 94178684481535, +STORE, 94178684469248, 94178684477439, +STORE, 94178684477440, 94178684481535, +SNULL, 140460855648255, 140460855652351, +STORE, 140460855644160, 140460855648255, +STORE, 140460855648256, 140460855652351, +ERASE, 140460855615488, 140460855644159, +STORE, 94178692063232, 94178692198399, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733096603648, 140737488351231, +SNULL, 140733096611839, 140737488351231, +STORE, 140733096603648, 140733096611839, +STORE, 140733096472576, 140733096611839, +STORE, 94796716122112, 94796718325759, +SNULL, 94796716224511, 94796718325759, +STORE, 94796716122112, 94796716224511, +STORE, 94796716224512, 94796718325759, +ERASE, 94796716224512, 94796718325759, +STORE, 94796718317568, 94796718325759, +STORE, 139667892793344, 139667895046143, +SNULL, 139667892936703, 139667895046143, +STORE, 139667892793344, 139667892936703, +STORE, 139667892936704, 139667895046143, +ERASE, 139667892936704, 139667895046143, +STORE, 139667895033856, 139667895042047, +STORE, 139667895042048, 139667895046143, +STORE, 140733096857600, 140733096861695, +STORE, 140733096845312, 140733096857599, +STORE, 139667895005184, 139667895033855, +STORE, 139667894996992, 139667895005183, +STORE, 139667890532352, 139667892793343, +SNULL, 139667890532352, 139667890683903, +STORE, 139667890683904, 139667892793343, +STORE, 139667890532352, 139667890683903, +SNULL, 139667892776959, 139667892793343, +STORE, 139667890683904, 139667892776959, +STORE, 139667892776960, 139667892793343, +SNULL, 139667892776960, 139667892785151, +STORE, 139667892785152, 139667892793343, +STORE, 139667892776960, 139667892785151, +ERASE, 139667892776960, 139667892785151, +STORE, 139667892776960, 139667892785151, +ERASE, 139667892785152, 139667892793343, +STORE, 139667892785152, 139667892793343, +STORE, 139667886735360, 139667890532351, +SNULL, 139667886735360, 139667888394239, +STORE, 139667888394240, 139667890532351, +STORE, 139667886735360, 139667888394239, +SNULL, 139667890491391, 139667890532351, +STORE, 139667888394240, 139667890491391, +STORE, 139667890491392, 139667890532351, +SNULL, 139667890491392, 139667890515967, +STORE, 139667890515968, 139667890532351, +STORE, 139667890491392, 139667890515967, +ERASE, 139667890491392, 139667890515967, +STORE, 139667890491392, 139667890515967, +ERASE, 139667890515968, 139667890532351, +STORE, 139667890515968, 139667890532351, +STORE, 139667884167168, 139667886735359, +SNULL, 139667884167168, 139667884634111, +STORE, 139667884634112, 139667886735359, +STORE, 139667884167168, 139667884634111, +SNULL, 139667886727167, 139667886735359, +STORE, 139667884634112, 139667886727167, +STORE, 139667886727168, 139667886735359, +ERASE, 139667886727168, 139667886735359, +STORE, 139667886727168, 139667886735359, +STORE, 139667882053632, 139667884167167, +SNULL, 139667882053632, 139667882065919, +STORE, 139667882065920, 139667884167167, +STORE, 139667882053632, 139667882065919, +SNULL, 139667884158975, 139667884167167, +STORE, 139667882065920, 139667884158975, +STORE, 139667884158976, 139667884167167, +ERASE, 139667884158976, 139667884167167, +STORE, 139667884158976, 139667884167167, +STORE, 139667879837696, 139667882053631, +SNULL, 139667879837696, 139667879935999, +STORE, 139667879936000, 139667882053631, +STORE, 139667879837696, 139667879935999, +SNULL, 139667882029055, 139667882053631, +STORE, 139667879936000, 139667882029055, +STORE, 139667882029056, 139667882053631, +SNULL, 139667882029056, 139667882037247, +STORE, 139667882037248, 139667882053631, +STORE, 139667882029056, 139667882037247, +ERASE, 139667882029056, 139667882037247, +STORE, 139667882029056, 139667882037247, +ERASE, 139667882037248, 139667882053631, +STORE, 139667882037248, 139667882053631, +STORE, 139667894988800, 139667895005183, +SNULL, 139667890507775, 139667890515967, +STORE, 139667890491392, 139667890507775, +STORE, 139667890507776, 139667890515967, +SNULL, 139667882033151, 139667882037247, +STORE, 139667882029056, 139667882033151, +STORE, 139667882033152, 139667882037247, +SNULL, 139667884163071, 139667884167167, +STORE, 139667884158976, 139667884163071, +STORE, 139667884163072, 139667884167167, +SNULL, 139667886731263, 139667886735359, +STORE, 139667886727168, 139667886731263, +STORE, 139667886731264, 139667886735359, +SNULL, 139667892781055, 139667892785151, +STORE, 139667892776960, 139667892781055, +STORE, 139667892781056, 139667892785151, +SNULL, 94796718321663, 94796718325759, +STORE, 94796718317568, 94796718321663, +STORE, 94796718321664, 94796718325759, +SNULL, 139667895037951, 139667895042047, +STORE, 139667895033856, 139667895037951, +STORE, 139667895037952, 139667895042047, +ERASE, 139667895005184, 139667895033855, +STORE, 94796726063104, 94796726198271, +STORE, 139667893305344, 139667894988799, +STORE, 139667895005184, 139667895033855, +STORE, 94796726063104, 94796726333439, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722489507840, 140737488351231, +SNULL, 140722489516031, 140737488351231, +STORE, 140722489507840, 140722489516031, +STORE, 140722489376768, 140722489516031, +STORE, 93980993265664, 93980995489791, +SNULL, 93980993376255, 93980995489791, +STORE, 93980993265664, 93980993376255, +STORE, 93980993376256, 93980995489791, +ERASE, 93980993376256, 93980995489791, +STORE, 93980995469312, 93980995481599, +STORE, 93980995481600, 93980995489791, +STORE, 140261313593344, 140261315846143, +SNULL, 140261313736703, 140261315846143, +STORE, 140261313593344, 140261313736703, +STORE, 140261313736704, 140261315846143, +ERASE, 140261313736704, 140261315846143, +STORE, 140261315833856, 140261315842047, +STORE, 140261315842048, 140261315846143, +STORE, 140722489675776, 140722489679871, +STORE, 140722489663488, 140722489675775, +STORE, 140261315805184, 140261315833855, +STORE, 140261315796992, 140261315805183, +STORE, 140261309796352, 140261313593343, +SNULL, 140261309796352, 140261311455231, +STORE, 140261311455232, 140261313593343, +STORE, 140261309796352, 140261311455231, +SNULL, 140261313552383, 140261313593343, +STORE, 140261311455232, 140261313552383, +STORE, 140261313552384, 140261313593343, +SNULL, 140261313552384, 140261313576959, +STORE, 140261313576960, 140261313593343, +STORE, 140261313552384, 140261313576959, +ERASE, 140261313552384, 140261313576959, +STORE, 140261313552384, 140261313576959, +ERASE, 140261313576960, 140261313593343, +STORE, 140261313576960, 140261313593343, +SNULL, 140261313568767, 140261313576959, +STORE, 140261313552384, 140261313568767, +STORE, 140261313568768, 140261313576959, +SNULL, 93980995477503, 93980995481599, +STORE, 93980995469312, 93980995477503, +STORE, 93980995477504, 93980995481599, +SNULL, 140261315837951, 140261315842047, +STORE, 140261315833856, 140261315837951, +STORE, 140261315837952, 140261315842047, +ERASE, 140261315805184, 140261315833855, +STORE, 93980997443584, 93980997578751, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140737488338944, 140737488351231, +STORE, 140734059450368, 140737488351231, +SNULL, 140734059462655, 140737488351231, +STORE, 140734059450368, 140734059462655, +STORE, 140734059319296, 140734059462655, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140307554983936, 140307557236735, +SNULL, 140307555127295, 140307557236735, +STORE, 140307554983936, 140307555127295, +STORE, 140307555127296, 140307557236735, +ERASE, 140307555127296, 140307557236735, +STORE, 140307557224448, 140307557232639, +STORE, 140307557232640, 140307557236735, +STORE, 140734059483136, 140734059487231, +STORE, 140734059470848, 140734059483135, +STORE, 140307557195776, 140307557224447, +STORE, 140307557187584, 140307557195775, +STORE, 140307551821824, 140307554983935, +SNULL, 140307551821824, 140307552882687, +STORE, 140307552882688, 140307554983935, +STORE, 140307551821824, 140307552882687, +SNULL, 140307554975743, 140307554983935, +STORE, 140307552882688, 140307554975743, +STORE, 140307554975744, 140307554983935, +ERASE, 140307554975744, 140307554983935, +STORE, 140307554975744, 140307554983935, +STORE, 140307548024832, 140307551821823, +SNULL, 140307548024832, 140307549683711, +STORE, 140307549683712, 140307551821823, +STORE, 140307548024832, 140307549683711, +SNULL, 140307551780863, 140307551821823, +STORE, 140307549683712, 140307551780863, +STORE, 140307551780864, 140307551821823, +SNULL, 140307551780864, 140307551805439, +STORE, 140307551805440, 140307551821823, +STORE, 140307551780864, 140307551805439, +ERASE, 140307551780864, 140307551805439, +STORE, 140307551780864, 140307551805439, +ERASE, 140307551805440, 140307551821823, +STORE, 140307551805440, 140307551821823, +STORE, 140307557175296, 140307557195775, +SNULL, 140307551797247, 140307551805439, +STORE, 140307551780864, 140307551797247, +STORE, 140307551797248, 140307551805439, +SNULL, 140307554979839, 140307554983935, +STORE, 140307554975744, 140307554979839, +STORE, 140307554979840, 140307554983935, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140307557228543, 140307557232639, +STORE, 140307557224448, 140307557228543, +STORE, 140307557228544, 140307557232639, +ERASE, 140307557195776, 140307557224447, +STORE, 39698432, 39833599, +STORE, 39698432, 39981055, +STORE, 94306485321728, 94306485432319, +STORE, 94306487525376, 94306487533567, +STORE, 94306487533568, 94306487537663, +STORE, 94306487537664, 94306487545855, +STORE, 94306488868864, 94306489004031, +STORE, 140497673998336, 140497675657215, +STORE, 140497675657216, 140497677754367, +STORE, 140497677754368, 140497677770751, +STORE, 140497677770752, 140497677778943, +STORE, 140497677778944, 140497677795327, +STORE, 140497677795328, 140497677938687, +STORE, 140497679998976, 140497680007167, +STORE, 140497680035840, 140497680039935, +STORE, 140497680039936, 140497680044031, +STORE, 140497680044032, 140497680048127, +STORE, 140732780462080, 140732780601343, +STORE, 140732782239744, 140732782252031, +STORE, 140732782252032, 140732782256127, +STORE, 94236915900416, 94236916011007, +STORE, 94236918104064, 94236918112255, +STORE, 94236918112256, 94236918116351, +STORE, 94236918116352, 94236918124543, +STORE, 94236939489280, 94236939624447, +STORE, 140046091743232, 140046093402111, +STORE, 140046093402112, 140046095499263, +STORE, 140046095499264, 140046095515647, +STORE, 140046095515648, 140046095523839, +STORE, 140046095523840, 140046095540223, +STORE, 140046095540224, 140046095683583, +STORE, 140046097743872, 140046097752063, +STORE, 140046097780736, 140046097784831, +STORE, 140046097784832, 140046097788927, +STORE, 140046097788928, 140046097793023, +STORE, 140726694449152, 140726694588415, +STORE, 140726695313408, 140726695325695, +STORE, 140726695325696, 140726695329791, +STORE, 94894582779904, 94894582992895, +STORE, 94894585090048, 94894585094143, +STORE, 94894585094144, 94894585102335, +STORE, 94894585102336, 94894585114623, +STORE, 94894592868352, 94894594293759, +STORE, 139733563842560, 139733565501439, +STORE, 139733565501440, 139733567598591, +STORE, 139733567598592, 139733567614975, +STORE, 139733567614976, 139733567623167, +STORE, 139733567623168, 139733567639551, +STORE, 139733567639552, 139733567651839, +STORE, 139733567651840, 139733569744895, +STORE, 139733569744896, 139733569748991, +STORE, 139733569748992, 139733569753087, +STORE, 139733569753088, 139733569896447, +STORE, 139733570265088, 139733571948543, +STORE, 139733571948544, 139733571964927, +STORE, 139733571993600, 139733571997695, +STORE, 139733571997696, 139733572001791, +STORE, 139733572001792, 139733572005887, +STORE, 140726369255424, 140726369394687, +STORE, 140726370402304, 140726370414591, +STORE, 140726370414592, 140726370418687, +STORE, 94899236483072, 94899236696063, +STORE, 94899238793216, 94899238797311, +STORE, 94899238797312, 94899238805503, +STORE, 94899238805504, 94899238817791, +STORE, 94899263045632, 94899263979519, +STORE, 140040959893504, 140040961552383, +STORE, 140040961552384, 140040963649535, +STORE, 140040963649536, 140040963665919, +STORE, 140040963665920, 140040963674111, +STORE, 140040963674112, 140040963690495, +STORE, 140040963690496, 140040963702783, +STORE, 140040963702784, 140040965795839, +STORE, 140040965795840, 140040965799935, +STORE, 140040965799936, 140040965804031, +STORE, 140040965804032, 140040965947391, +STORE, 140040966316032, 140040967999487, +STORE, 140040967999488, 140040968015871, +STORE, 140040968044544, 140040968048639, +STORE, 140040968048640, 140040968052735, +STORE, 140040968052736, 140040968056831, +STORE, 140729921359872, 140729921499135, +STORE, 140729921613824, 140729921626111, +STORE, 140729921626112, 140729921630207, +STORE, 94818265190400, 94818265403391, +STORE, 94818267500544, 94818267504639, +STORE, 94818267504640, 94818267512831, +STORE, 94818267512832, 94818267525119, +STORE, 94818283372544, 94818285858815, +STORE, 139818425675776, 139818427334655, +STORE, 139818427334656, 139818429431807, +STORE, 139818429431808, 139818429448191, +STORE, 139818429448192, 139818429456383, +STORE, 139818429456384, 139818429472767, +STORE, 139818429472768, 139818429485055, +STORE, 139818429485056, 139818431578111, +STORE, 139818431578112, 139818431582207, +STORE, 139818431582208, 139818431586303, +STORE, 139818431586304, 139818431729663, +STORE, 139818432098304, 139818433781759, +STORE, 139818433781760, 139818433798143, +STORE, 139818433826816, 139818433830911, +STORE, 139818433830912, 139818433835007, +STORE, 139818433835008, 139818433839103, +STORE, 140726170509312, 140726170648575, +STORE, 140726171824128, 140726171836415, +STORE, 140726171836416, 140726171840511, +STORE, 94611513188352, 94611513401343, +STORE, 94611515498496, 94611515502591, +STORE, 94611515502592, 94611515510783, +STORE, 94611515510784, 94611515523071, +STORE, 94611516502016, 94611516907519, +STORE, 140596246388736, 140596248047615, +STORE, 140596248047616, 140596250144767, +STORE, 140596250144768, 140596250161151, +STORE, 140596250161152, 140596250169343, +STORE, 140596250169344, 140596250185727, +STORE, 140596250185728, 140596250198015, +STORE, 140596250198016, 140596252291071, +STORE, 140596252291072, 140596252295167, +STORE, 140596252295168, 140596252299263, +STORE, 140596252299264, 140596252442623, +STORE, 140596252811264, 140596254494719, +STORE, 140596254494720, 140596254511103, +STORE, 140596254539776, 140596254543871, +STORE, 140596254543872, 140596254547967, +STORE, 140596254547968, 140596254552063, +STORE, 140731551338496, 140731551477759, +STORE, 140731551780864, 140731551793151, +STORE, 140731551793152, 140731551797247, +STORE, 94313835851776, 94313836064767, +STORE, 94313838161920, 94313838166015, +STORE, 94313838166016, 94313838174207, +STORE, 94313838174208, 94313838186495, +STORE, 94313858416640, 94313861906431, +STORE, 140693503918080, 140693505576959, +STORE, 140693505576960, 140693507674111, +STORE, 140693507674112, 140693507690495, +STORE, 140693507690496, 140693507698687, +STORE, 140693507698688, 140693507715071, +STORE, 140693507715072, 140693507727359, +STORE, 140693507727360, 140693509820415, +STORE, 140693509820416, 140693509824511, +STORE, 140693509824512, 140693509828607, +STORE, 140693509828608, 140693509971967, +STORE, 140693510340608, 140693512024063, +STORE, 140693512024064, 140693512040447, +STORE, 140693512069120, 140693512073215, +STORE, 140693512073216, 140693512077311, +STORE, 140693512077312, 140693512081407, +STORE, 140721116065792, 140721116205055, +STORE, 140721117831168, 140721117843455, +STORE, 140721117843456, 140721117847551, +STORE, 94843650150400, 94843650363391, +STORE, 94843652460544, 94843652464639, +STORE, 94843652464640, 94843652472831, +STORE, 94843652472832, 94843652485119, +STORE, 94843685388288, 94843686281215, +STORE, 140484193681408, 140484195340287, +STORE, 140484195340288, 140484197437439, +STORE, 140484197437440, 140484197453823, +STORE, 140484197453824, 140484197462015, +STORE, 140484197462016, 140484197478399, +STORE, 140484197478400, 140484197490687, +STORE, 140484197490688, 140484199583743, +STORE, 140484199583744, 140484199587839, +STORE, 140484199587840, 140484199591935, +STORE, 140484199591936, 140484199735295, +STORE, 140484200103936, 140484201787391, +STORE, 140484201787392, 140484201803775, +STORE, 140484201832448, 140484201836543, +STORE, 140484201836544, 140484201840639, +STORE, 140484201840640, 140484201844735, +STORE, 140726294315008, 140726294454271, +STORE, 140726295646208, 140726295658495, +STORE, 140726295658496, 140726295662591, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140720422371328, 140737488351231, +SNULL, 140720422379519, 140737488351231, +STORE, 140720422371328, 140720422379519, +STORE, 140720422240256, 140720422379519, +STORE, 94417967845376, 94417970180095, +SNULL, 94417968058367, 94417970180095, +STORE, 94417967845376, 94417968058367, +STORE, 94417968058368, 94417970180095, +ERASE, 94417968058368, 94417970180095, +STORE, 94417970155520, 94417970167807, +STORE, 94417970167808, 94417970180095, +STORE, 140252450045952, 140252452298751, +SNULL, 140252450189311, 140252452298751, +STORE, 140252450045952, 140252450189311, +STORE, 140252450189312, 140252452298751, +ERASE, 140252450189312, 140252452298751, +STORE, 140252452286464, 140252452294655, +STORE, 140252452294656, 140252452298751, +STORE, 140720422416384, 140720422420479, +STORE, 140720422404096, 140720422416383, +STORE, 140252452257792, 140252452286463, +STORE, 140252452249600, 140252452257791, +STORE, 140252447932416, 140252450045951, +SNULL, 140252447932416, 140252447944703, +STORE, 140252447944704, 140252450045951, +STORE, 140252447932416, 140252447944703, +SNULL, 140252450037759, 140252450045951, +STORE, 140252447944704, 140252450037759, +STORE, 140252450037760, 140252450045951, +ERASE, 140252450037760, 140252450045951, +STORE, 140252450037760, 140252450045951, +STORE, 140252444135424, 140252447932415, +SNULL, 140252444135424, 140252445794303, +STORE, 140252445794304, 140252447932415, +STORE, 140252444135424, 140252445794303, +SNULL, 140252447891455, 140252447932415, +STORE, 140252445794304, 140252447891455, +STORE, 140252447891456, 140252447932415, +SNULL, 140252447891456, 140252447916031, +STORE, 140252447916032, 140252447932415, +STORE, 140252447891456, 140252447916031, +ERASE, 140252447891456, 140252447916031, +STORE, 140252447891456, 140252447916031, +ERASE, 140252447916032, 140252447932415, +STORE, 140252447916032, 140252447932415, +STORE, 140252452241408, 140252452257791, +SNULL, 140252447907839, 140252447916031, +STORE, 140252447891456, 140252447907839, +STORE, 140252447907840, 140252447916031, +SNULL, 140252450041855, 140252450045951, +STORE, 140252450037760, 140252450041855, +STORE, 140252450041856, 140252450045951, +SNULL, 94417970159615, 94417970167807, +STORE, 94417970155520, 94417970159615, +STORE, 94417970159616, 94417970167807, +SNULL, 140252452290559, 140252452294655, +STORE, 140252452286464, 140252452290559, +STORE, 140252452290560, 140252452294655, +ERASE, 140252452257792, 140252452286463, +STORE, 94417996333056, 94417996468223, +STORE, 140252450557952, 140252452241407, +STORE, 94417996333056, 94417996603391, +STORE, 94417996333056, 94417996738559, +STORE, 94417996333056, 94417996910591, +SNULL, 94417996881919, 94417996910591, +STORE, 94417996333056, 94417996881919, +STORE, 94417996881920, 94417996910591, +ERASE, 94417996881920, 94417996910591, +STORE, 94417996333056, 94417997017087, +STORE, 94417996333056, 94417997152255, +SNULL, 94417997135871, 94417997152255, +STORE, 94417996333056, 94417997135871, +STORE, 94417997135872, 94417997152255, +ERASE, 94417997135872, 94417997152255, +STORE, 94417996333056, 94417997291519, +SNULL, 94417997271039, 94417997291519, +STORE, 94417996333056, 94417997271039, +STORE, 94417997271040, 94417997291519, +ERASE, 94417997271040, 94417997291519, +STORE, 94417996333056, 94417997406207, +SNULL, 94417997381631, 94417997406207, +STORE, 94417996333056, 94417997381631, +STORE, 94417997381632, 94417997406207, +ERASE, 94417997381632, 94417997406207, +STORE, 94417996333056, 94417997516799, +SNULL, 94417997488127, 94417997516799, +STORE, 94417996333056, 94417997488127, +STORE, 94417997488128, 94417997516799, +ERASE, 94417997488128, 94417997516799, +STORE, 94417996333056, 94417997643775, +SNULL, 94417997631487, 94417997643775, +STORE, 94417996333056, 94417997631487, +STORE, 94417997631488, 94417997643775, +ERASE, 94417997631488, 94417997643775, +SNULL, 94417997590527, 94417997631487, +STORE, 94417996333056, 94417997590527, +STORE, 94417997590528, 94417997631487, +ERASE, 94417997590528, 94417997631487, +STORE, 94417996333056, 94417997733887, +STORE, 94417996333056, 94417997869055, +STORE, 94417996333056, 94417998004223, +SNULL, 94417998000127, 94417998004223, +STORE, 94417996333056, 94417998000127, +STORE, 94417998000128, 94417998004223, +ERASE, 94417998000128, 94417998004223, +STORE, 94049170993152, 94049171206143, +STORE, 94049173303296, 94049173307391, +STORE, 94049173307392, 94049173315583, +STORE, 94049173315584, 94049173327871, +STORE, 94049176236032, 94049183645695, +STORE, 139807795544064, 139807797202943, +STORE, 139807797202944, 139807799300095, +STORE, 139807799300096, 139807799316479, +STORE, 139807799316480, 139807799324671, +STORE, 139807799324672, 139807799341055, +STORE, 139807799341056, 139807799353343, +STORE, 139807799353344, 139807801446399, +STORE, 139807801446400, 139807801450495, +STORE, 139807801450496, 139807801454591, +STORE, 139807801454592, 139807801597951, +STORE, 139807801966592, 139807803650047, +STORE, 139807803650048, 139807803666431, +STORE, 139807803695104, 139807803699199, +STORE, 139807803699200, 139807803703295, +STORE, 139807803703296, 139807803707391, +STORE, 140727555538944, 140727555678207, +STORE, 140727555940352, 140727555952639, +STORE, 140727555952640, 140727555956735, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722483441664, 140737488351231, +SNULL, 140722483449855, 140737488351231, +STORE, 140722483441664, 140722483449855, +STORE, 140722483310592, 140722483449855, +STORE, 94416704921600, 94416707145727, +SNULL, 94416705032191, 94416707145727, +STORE, 94416704921600, 94416705032191, +STORE, 94416705032192, 94416707145727, +ERASE, 94416705032192, 94416707145727, +STORE, 94416707125248, 94416707137535, +STORE, 94416707137536, 94416707145727, +STORE, 140555439296512, 140555441549311, +SNULL, 140555439439871, 140555441549311, +STORE, 140555439296512, 140555439439871, +STORE, 140555439439872, 140555441549311, +ERASE, 140555439439872, 140555441549311, +STORE, 140555441537024, 140555441545215, +STORE, 140555441545216, 140555441549311, +STORE, 140722484781056, 140722484785151, +STORE, 140722484768768, 140722484781055, +STORE, 140555441508352, 140555441537023, +STORE, 140555441500160, 140555441508351, +STORE, 140555435499520, 140555439296511, +SNULL, 140555435499520, 140555437158399, +STORE, 140555437158400, 140555439296511, +STORE, 140555435499520, 140555437158399, +SNULL, 140555439255551, 140555439296511, +STORE, 140555437158400, 140555439255551, +STORE, 140555439255552, 140555439296511, +SNULL, 140555439255552, 140555439280127, +STORE, 140555439280128, 140555439296511, +STORE, 140555439255552, 140555439280127, +ERASE, 140555439255552, 140555439280127, +STORE, 140555439255552, 140555439280127, +ERASE, 140555439280128, 140555439296511, +STORE, 140555439280128, 140555439296511, +SNULL, 140555439271935, 140555439280127, +STORE, 140555439255552, 140555439271935, +STORE, 140555439271936, 140555439280127, +SNULL, 94416707133439, 94416707137535, +STORE, 94416707125248, 94416707133439, +STORE, 94416707133440, 94416707137535, +SNULL, 140555441541119, 140555441545215, +STORE, 140555441537024, 140555441541119, +STORE, 140555441541120, 140555441545215, +ERASE, 140555441508352, 140555441537023, +STORE, 94416724672512, 94416724807679, +STORE, 94686636953600, 94686637166591, +STORE, 94686639263744, 94686639267839, +STORE, 94686639267840, 94686639276031, +STORE, 94686639276032, 94686639288319, +STORE, 94686662193152, 94686663163903, +STORE, 140312944431104, 140312946089983, +STORE, 140312946089984, 140312948187135, +STORE, 140312948187136, 140312948203519, +STORE, 140312948203520, 140312948211711, +STORE, 140312948211712, 140312948228095, +STORE, 140312948228096, 140312948240383, +STORE, 140312948240384, 140312950333439, +STORE, 140312950333440, 140312950337535, +STORE, 140312950337536, 140312950341631, +STORE, 140312950341632, 140312950484991, +STORE, 140312950853632, 140312952537087, +STORE, 140312952537088, 140312952553471, +STORE, 140312952582144, 140312952586239, +STORE, 140312952586240, 140312952590335, +STORE, 140312952590336, 140312952594431, +STORE, 140730598920192, 140730599059455, +STORE, 140730599108608, 140730599120895, +STORE, 140730599120896, 140730599124991, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140726234079232, 140737488351231, +SNULL, 140726234087423, 140737488351231, +STORE, 140726234079232, 140726234087423, +STORE, 140726233948160, 140726234087423, +STORE, 94589467578368, 94589469802495, +SNULL, 94589467688959, 94589469802495, +STORE, 94589467578368, 94589467688959, +STORE, 94589467688960, 94589469802495, +ERASE, 94589467688960, 94589469802495, +STORE, 94589469782016, 94589469794303, +STORE, 94589469794304, 94589469802495, +STORE, 140587082842112, 140587085094911, +SNULL, 140587082985471, 140587085094911, +STORE, 140587082842112, 140587082985471, +STORE, 140587082985472, 140587085094911, +ERASE, 140587082985472, 140587085094911, +STORE, 140587085082624, 140587085090815, +STORE, 140587085090816, 140587085094911, +STORE, 140726234103808, 140726234107903, +STORE, 140726234091520, 140726234103807, +STORE, 140587085053952, 140587085082623, +STORE, 140587085045760, 140587085053951, +STORE, 140587079045120, 140587082842111, +SNULL, 140587079045120, 140587080703999, +STORE, 140587080704000, 140587082842111, +STORE, 140587079045120, 140587080703999, +SNULL, 140587082801151, 140587082842111, +STORE, 140587080704000, 140587082801151, +STORE, 140587082801152, 140587082842111, +SNULL, 140587082801152, 140587082825727, +STORE, 140587082825728, 140587082842111, +STORE, 140587082801152, 140587082825727, +ERASE, 140587082801152, 140587082825727, +STORE, 140587082801152, 140587082825727, +ERASE, 140587082825728, 140587082842111, +STORE, 140587082825728, 140587082842111, +SNULL, 140587082817535, 140587082825727, +STORE, 140587082801152, 140587082817535, +STORE, 140587082817536, 140587082825727, +SNULL, 94589469790207, 94589469794303, +STORE, 94589469782016, 94589469790207, +STORE, 94589469790208, 94589469794303, +SNULL, 140587085086719, 140587085090815, +STORE, 140587085082624, 140587085086719, +STORE, 140587085086720, 140587085090815, +ERASE, 140587085053952, 140587085082623, +STORE, 94589477507072, 94589477642239, +STORE, 94225448325120, 94225448538111, +STORE, 94225450635264, 94225450639359, +STORE, 94225450639360, 94225450647551, +STORE, 94225450647552, 94225450659839, +STORE, 94225470246912, 94225473548287, +STORE, 140199245496320, 140199247155199, +STORE, 140199247155200, 140199249252351, +STORE, 140199249252352, 140199249268735, +STORE, 140199249268736, 140199249276927, +STORE, 140199249276928, 140199249293311, +STORE, 140199249293312, 140199249305599, +STORE, 140199249305600, 140199251398655, +STORE, 140199251398656, 140199251402751, +STORE, 140199251402752, 140199251406847, +STORE, 140199251406848, 140199251550207, +STORE, 140199251918848, 140199253602303, +STORE, 140199253602304, 140199253618687, +STORE, 140199253647360, 140199253651455, +STORE, 140199253651456, 140199253655551, +STORE, 140199253655552, 140199253659647, +STORE, 140726264414208, 140726264553471, +STORE, 140726265843712, 140726265855999, +STORE, 140726265856000, 140726265860095, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733508358144, 140737488351231, +SNULL, 140733508366335, 140737488351231, +STORE, 140733508358144, 140733508366335, +STORE, 140733508227072, 140733508366335, +STORE, 94766263947264, 94766266171391, +SNULL, 94766264057855, 94766266171391, +STORE, 94766263947264, 94766264057855, +STORE, 94766264057856, 94766266171391, +ERASE, 94766264057856, 94766266171391, +STORE, 94766266150912, 94766266163199, +STORE, 94766266163200, 94766266171391, +STORE, 140693985132544, 140693987385343, +SNULL, 140693985275903, 140693987385343, +STORE, 140693985132544, 140693985275903, +STORE, 140693985275904, 140693987385343, +ERASE, 140693985275904, 140693987385343, +STORE, 140693987373056, 140693987381247, +STORE, 140693987381248, 140693987385343, +STORE, 140733509939200, 140733509943295, +STORE, 140733509926912, 140733509939199, +STORE, 140693987344384, 140693987373055, +STORE, 140693987336192, 140693987344383, +STORE, 140693981335552, 140693985132543, +SNULL, 140693981335552, 140693982994431, +STORE, 140693982994432, 140693985132543, +STORE, 140693981335552, 140693982994431, +SNULL, 140693985091583, 140693985132543, +STORE, 140693982994432, 140693985091583, +STORE, 140693985091584, 140693985132543, +SNULL, 140693985091584, 140693985116159, +STORE, 140693985116160, 140693985132543, +STORE, 140693985091584, 140693985116159, +ERASE, 140693985091584, 140693985116159, +STORE, 140693985091584, 140693985116159, +ERASE, 140693985116160, 140693985132543, +STORE, 140693985116160, 140693985132543, +SNULL, 140693985107967, 140693985116159, +STORE, 140693985091584, 140693985107967, +STORE, 140693985107968, 140693985116159, +SNULL, 94766266159103, 94766266163199, +STORE, 94766266150912, 94766266159103, +STORE, 94766266159104, 94766266163199, +SNULL, 140693987377151, 140693987381247, +STORE, 140693987373056, 140693987377151, +STORE, 140693987377152, 140693987381247, +ERASE, 140693987344384, 140693987373055, +STORE, 94766282035200, 94766282170367, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724769353728, 140737488351231, +SNULL, 140724769361919, 140737488351231, +STORE, 140724769353728, 140724769361919, +STORE, 140724769222656, 140724769361919, +STORE, 94710460526592, 94710462750719, +SNULL, 94710460637183, 94710462750719, +STORE, 94710460526592, 94710460637183, +STORE, 94710460637184, 94710462750719, +ERASE, 94710460637184, 94710462750719, +STORE, 94710462730240, 94710462742527, +STORE, 94710462742528, 94710462750719, +STORE, 140469764395008, 140469766647807, +SNULL, 140469764538367, 140469766647807, +STORE, 140469764395008, 140469764538367, +STORE, 140469764538368, 140469766647807, +ERASE, 140469764538368, 140469766647807, +STORE, 140469766635520, 140469766643711, +STORE, 140469766643712, 140469766647807, +STORE, 140724770877440, 140724770881535, +STORE, 140724770865152, 140724770877439, +STORE, 140469766606848, 140469766635519, +STORE, 140469766598656, 140469766606847, +STORE, 140469760598016, 140469764395007, +SNULL, 140469760598016, 140469762256895, +STORE, 140469762256896, 140469764395007, +STORE, 140469760598016, 140469762256895, +SNULL, 140469764354047, 140469764395007, +STORE, 140469762256896, 140469764354047, +STORE, 140469764354048, 140469764395007, +SNULL, 140469764354048, 140469764378623, +STORE, 140469764378624, 140469764395007, +STORE, 140469764354048, 140469764378623, +ERASE, 140469764354048, 140469764378623, +STORE, 140469764354048, 140469764378623, +ERASE, 140469764378624, 140469764395007, +STORE, 140469764378624, 140469764395007, +SNULL, 140469764370431, 140469764378623, +STORE, 140469764354048, 140469764370431, +STORE, 140469764370432, 140469764378623, +SNULL, 94710462738431, 94710462742527, +STORE, 94710462730240, 94710462738431, +STORE, 94710462738432, 94710462742527, +SNULL, 140469766639615, 140469766643711, +STORE, 140469766635520, 140469766639615, +STORE, 140469766639616, 140469766643711, +ERASE, 140469766606848, 140469766635519, +STORE, 94710485581824, 94710485716991, +STORE, 94105755795456, 94105756008447, +STORE, 94105758105600, 94105758109695, +STORE, 94105758109696, 94105758117887, +STORE, 94105758117888, 94105758130175, +STORE, 94105788981248, 94105794871295, +STORE, 140641190031360, 140641191690239, +STORE, 140641191690240, 140641193787391, +STORE, 140641193787392, 140641193803775, +STORE, 140641193803776, 140641193811967, +STORE, 140641193811968, 140641193828351, +STORE, 140641193828352, 140641193840639, +STORE, 140641193840640, 140641195933695, +STORE, 140641195933696, 140641195937791, +STORE, 140641195937792, 140641195941887, +STORE, 140641195941888, 140641196085247, +STORE, 140641196453888, 140641198137343, +STORE, 140641198137344, 140641198153727, +STORE, 140641198182400, 140641198186495, +STORE, 140641198186496, 140641198190591, +STORE, 140641198190592, 140641198194687, +STORE, 140731980034048, 140731980173311, +STORE, 140731981078528, 140731981090815, +STORE, 140731981090816, 140731981094911, +STORE, 93828086431744, 93828086644735, +STORE, 93828088741888, 93828088745983, +STORE, 93828088745984, 93828088754175, +STORE, 93828088754176, 93828088766463, +STORE, 93828094193664, 93828096831487, +STORE, 139844717334528, 139844718993407, +STORE, 139844718993408, 139844721090559, +STORE, 139844721090560, 139844721106943, +STORE, 139844721106944, 139844721115135, +STORE, 139844721115136, 139844721131519, +STORE, 139844721131520, 139844721143807, +STORE, 139844721143808, 139844723236863, +STORE, 139844723236864, 139844723240959, +STORE, 139844723240960, 139844723245055, +STORE, 139844723245056, 139844723388415, +STORE, 139844723757056, 139844725440511, +STORE, 139844725440512, 139844725456895, +STORE, 139844725485568, 139844725489663, +STORE, 139844725489664, 139844725493759, +STORE, 139844725493760, 139844725497855, +STORE, 140729996185600, 140729996324863, +STORE, 140729996828672, 140729996840959, +STORE, 140729996840960, 140729996845055, +STORE, 140737488347136, 140737488351231, +STORE, 140722494771200, 140737488351231, +SNULL, 140722494775295, 140737488351231, +STORE, 140722494771200, 140722494775295, +STORE, 140722494640128, 140722494775295, +STORE, 94324011311104, 94324013535231, +SNULL, 94324011421695, 94324013535231, +STORE, 94324011311104, 94324011421695, +STORE, 94324011421696, 94324013535231, +ERASE, 94324011421696, 94324013535231, +STORE, 94324013514752, 94324013527039, +STORE, 94324013527040, 94324013535231, +STORE, 140151462309888, 140151464562687, +SNULL, 140151462453247, 140151464562687, +STORE, 140151462309888, 140151462453247, +STORE, 140151462453248, 140151464562687, +ERASE, 140151462453248, 140151464562687, +STORE, 140151464550400, 140151464558591, +STORE, 140151464558592, 140151464562687, +STORE, 140722495467520, 140722495471615, +STORE, 140722495455232, 140722495467519, +STORE, 140151464521728, 140151464550399, +STORE, 140151464513536, 140151464521727, +STORE, 140151458512896, 140151462309887, +SNULL, 140151458512896, 140151460171775, +STORE, 140151460171776, 140151462309887, +STORE, 140151458512896, 140151460171775, +SNULL, 140151462268927, 140151462309887, +STORE, 140151460171776, 140151462268927, +STORE, 140151462268928, 140151462309887, +SNULL, 140151462268928, 140151462293503, +STORE, 140151462293504, 140151462309887, +STORE, 140151462268928, 140151462293503, +ERASE, 140151462268928, 140151462293503, +STORE, 140151462268928, 140151462293503, +ERASE, 140151462293504, 140151462309887, +STORE, 140151462293504, 140151462309887, +SNULL, 140151462285311, 140151462293503, +STORE, 140151462268928, 140151462285311, +STORE, 140151462285312, 140151462293503, +SNULL, 94324013522943, 94324013527039, +STORE, 94324013514752, 94324013522943, +STORE, 94324013522944, 94324013527039, +SNULL, 140151464554495, 140151464558591, +STORE, 140151464550400, 140151464554495, +STORE, 140151464554496, 140151464558591, +ERASE, 140151464521728, 140151464550399, +STORE, 94324024778752, 94324024913919, +STORE, 94899262967808, 94899263180799, +STORE, 94899265277952, 94899265282047, +STORE, 94899265282048, 94899265290239, +STORE, 94899265290240, 94899265302527, +STORE, 94899295469568, 94899298689023, +STORE, 140434388418560, 140434390077439, +STORE, 140434390077440, 140434392174591, +STORE, 140434392174592, 140434392190975, +STORE, 140434392190976, 140434392199167, +STORE, 140434392199168, 140434392215551, +STORE, 140434392215552, 140434392227839, +STORE, 140434392227840, 140434394320895, +STORE, 140434394320896, 140434394324991, +STORE, 140434394324992, 140434394329087, +STORE, 140434394329088, 140434394472447, +STORE, 140434394841088, 140434396524543, +STORE, 140434396524544, 140434396540927, +STORE, 140434396569600, 140434396573695, +STORE, 140434396573696, 140434396577791, +STORE, 140434396577792, 140434396581887, +STORE, 140720618135552, 140720618274815, +STORE, 140720618418176, 140720618430463, +STORE, 140720618430464, 140720618434559, +STORE, 94425529798656, 94425530011647, +STORE, 94425532108800, 94425532112895, +STORE, 94425532112896, 94425532121087, +STORE, 94425532121088, 94425532133375, +STORE, 94425557753856, 94425566576639, +STORE, 140600528470016, 140600530128895, +STORE, 140600530128896, 140600532226047, +STORE, 140600532226048, 140600532242431, +STORE, 140600532242432, 140600532250623, +STORE, 140600532250624, 140600532267007, +STORE, 140600532267008, 140600532279295, +STORE, 140600532279296, 140600534372351, +STORE, 140600534372352, 140600534376447, +STORE, 140600534376448, 140600534380543, +STORE, 140600534380544, 140600534523903, +STORE, 140600534892544, 140600536575999, +STORE, 140600536576000, 140600536592383, +STORE, 140600536621056, 140600536625151, +STORE, 140600536625152, 140600536629247, +STORE, 140600536629248, 140600536633343, +STORE, 140721857785856, 140721857925119, +STORE, 140721858068480, 140721858080767, +STORE, 140721858080768, 140721858084863, +STORE, 94425529798656, 94425530011647, +STORE, 94425532108800, 94425532112895, +STORE, 94425532112896, 94425532121087, +STORE, 94425532121088, 94425532133375, +STORE, 94425557753856, 94425568772095, +STORE, 140600528470016, 140600530128895, +STORE, 140600530128896, 140600532226047, +STORE, 140600532226048, 140600532242431, +STORE, 140600532242432, 140600532250623, +STORE, 140600532250624, 140600532267007, +STORE, 140600532267008, 140600532279295, +STORE, 140600532279296, 140600534372351, +STORE, 140600534372352, 140600534376447, +STORE, 140600534376448, 140600534380543, +STORE, 140600534380544, 140600534523903, +STORE, 140600534892544, 140600536575999, +STORE, 140600536576000, 140600536592383, +STORE, 140600536621056, 140600536625151, +STORE, 140600536625152, 140600536629247, +STORE, 140600536629248, 140600536633343, +STORE, 140721857785856, 140721857925119, +STORE, 140721858068480, 140721858080767, +STORE, 140721858080768, 140721858084863, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735611645952, 140737488351231, +SNULL, 140735611654143, 140737488351231, +STORE, 140735611645952, 140735611654143, +STORE, 140735611514880, 140735611654143, +STORE, 94592137641984, 94592139866111, +SNULL, 94592137752575, 94592139866111, +STORE, 94592137641984, 94592137752575, +STORE, 94592137752576, 94592139866111, +ERASE, 94592137752576, 94592139866111, +STORE, 94592139845632, 94592139857919, +STORE, 94592139857920, 94592139866111, +STORE, 140350425030656, 140350427283455, +SNULL, 140350425174015, 140350427283455, +STORE, 140350425030656, 140350425174015, +STORE, 140350425174016, 140350427283455, +ERASE, 140350425174016, 140350427283455, +STORE, 140350427271168, 140350427279359, +STORE, 140350427279360, 140350427283455, +STORE, 140735612043264, 140735612047359, +STORE, 140735612030976, 140735612043263, +STORE, 140350427242496, 140350427271167, +STORE, 140350427234304, 140350427242495, +STORE, 140350421233664, 140350425030655, +SNULL, 140350421233664, 140350422892543, +STORE, 140350422892544, 140350425030655, +STORE, 140350421233664, 140350422892543, +SNULL, 140350424989695, 140350425030655, +STORE, 140350422892544, 140350424989695, +STORE, 140350424989696, 140350425030655, +SNULL, 140350424989696, 140350425014271, +STORE, 140350425014272, 140350425030655, +STORE, 140350424989696, 140350425014271, +ERASE, 140350424989696, 140350425014271, +STORE, 140350424989696, 140350425014271, +ERASE, 140350425014272, 140350425030655, +STORE, 140350425014272, 140350425030655, +SNULL, 140350425006079, 140350425014271, +STORE, 140350424989696, 140350425006079, +STORE, 140350425006080, 140350425014271, +SNULL, 94592139853823, 94592139857919, +STORE, 94592139845632, 94592139853823, +STORE, 94592139853824, 94592139857919, +SNULL, 140350427275263, 140350427279359, +STORE, 140350427271168, 140350427275263, +STORE, 140350427275264, 140350427279359, +ERASE, 140350427242496, 140350427271167, +STORE, 94592164823040, 94592164958207, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723500535808, 140737488351231, +SNULL, 140723500543999, 140737488351231, +STORE, 140723500535808, 140723500543999, +STORE, 140723500404736, 140723500543999, +STORE, 94458379010048, 94458381234175, +SNULL, 94458379120639, 94458381234175, +STORE, 94458379010048, 94458379120639, +STORE, 94458379120640, 94458381234175, +ERASE, 94458379120640, 94458381234175, +STORE, 94458381213696, 94458381225983, +STORE, 94458381225984, 94458381234175, +STORE, 139771674230784, 139771676483583, +SNULL, 139771674374143, 139771676483583, +STORE, 139771674230784, 139771674374143, +STORE, 139771674374144, 139771676483583, +ERASE, 139771674374144, 139771676483583, +STORE, 139771676471296, 139771676479487, +STORE, 139771676479488, 139771676483583, +STORE, 140723500769280, 140723500773375, +STORE, 140723500756992, 140723500769279, +STORE, 139771676442624, 139771676471295, +STORE, 139771676434432, 139771676442623, +STORE, 139771670433792, 139771674230783, +SNULL, 139771670433792, 139771672092671, +STORE, 139771672092672, 139771674230783, +STORE, 139771670433792, 139771672092671, +SNULL, 139771674189823, 139771674230783, +STORE, 139771672092672, 139771674189823, +STORE, 139771674189824, 139771674230783, +SNULL, 139771674189824, 139771674214399, +STORE, 139771674214400, 139771674230783, +STORE, 139771674189824, 139771674214399, +ERASE, 139771674189824, 139771674214399, +STORE, 139771674189824, 139771674214399, +ERASE, 139771674214400, 139771674230783, +STORE, 139771674214400, 139771674230783, +SNULL, 139771674206207, 139771674214399, +STORE, 139771674189824, 139771674206207, +STORE, 139771674206208, 139771674214399, +SNULL, 94458381221887, 94458381225983, +STORE, 94458381213696, 94458381221887, +STORE, 94458381221888, 94458381225983, +SNULL, 139771676475391, 139771676479487, +STORE, 139771676471296, 139771676475391, +STORE, 139771676475392, 139771676479487, +ERASE, 139771676442624, 139771676471295, +STORE, 94458401873920, 94458402009087, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731316264960, 140737488351231, +SNULL, 140731316273151, 140737488351231, +STORE, 140731316264960, 140731316273151, +STORE, 140731316133888, 140731316273151, +STORE, 94437830881280, 94437833215999, +SNULL, 94437831094271, 94437833215999, +STORE, 94437830881280, 94437831094271, +STORE, 94437831094272, 94437833215999, +ERASE, 94437831094272, 94437833215999, +STORE, 94437833191424, 94437833203711, +STORE, 94437833203712, 94437833215999, +STORE, 140265986031616, 140265988284415, +SNULL, 140265986174975, 140265988284415, +STORE, 140265986031616, 140265986174975, +STORE, 140265986174976, 140265988284415, +ERASE, 140265986174976, 140265988284415, +STORE, 140265988272128, 140265988280319, +STORE, 140265988280320, 140265988284415, +STORE, 140731316318208, 140731316322303, +STORE, 140731316305920, 140731316318207, +STORE, 140265988243456, 140265988272127, +STORE, 140265988235264, 140265988243455, +STORE, 140265983918080, 140265986031615, +SNULL, 140265983918080, 140265983930367, +STORE, 140265983930368, 140265986031615, +STORE, 140265983918080, 140265983930367, +SNULL, 140265986023423, 140265986031615, +STORE, 140265983930368, 140265986023423, +STORE, 140265986023424, 140265986031615, +ERASE, 140265986023424, 140265986031615, +STORE, 140265986023424, 140265986031615, +STORE, 140265980121088, 140265983918079, +SNULL, 140265980121088, 140265981779967, +STORE, 140265981779968, 140265983918079, +STORE, 140265980121088, 140265981779967, +SNULL, 140265983877119, 140265983918079, +STORE, 140265981779968, 140265983877119, +STORE, 140265983877120, 140265983918079, +SNULL, 140265983877120, 140265983901695, +STORE, 140265983901696, 140265983918079, +STORE, 140265983877120, 140265983901695, +ERASE, 140265983877120, 140265983901695, +STORE, 140265983877120, 140265983901695, +ERASE, 140265983901696, 140265983918079, +STORE, 140265983901696, 140265983918079, +STORE, 140265988227072, 140265988243455, +SNULL, 140265983893503, 140265983901695, +STORE, 140265983877120, 140265983893503, +STORE, 140265983893504, 140265983901695, +SNULL, 140265986027519, 140265986031615, +STORE, 140265986023424, 140265986027519, +STORE, 140265986027520, 140265986031615, +SNULL, 94437833195519, 94437833203711, +STORE, 94437833191424, 94437833195519, +STORE, 94437833195520, 94437833203711, +SNULL, 140265988276223, 140265988280319, +STORE, 140265988272128, 140265988276223, +STORE, 140265988276224, 140265988280319, +ERASE, 140265988243456, 140265988272127, +STORE, 94437847638016, 94437847773183, +STORE, 140265986543616, 140265988227071, +STORE, 94437847638016, 94437847908351, +STORE, 94437847638016, 94437848043519, +STORE, 94437847638016, 94437848190975, +SNULL, 94437848178687, 94437848190975, +STORE, 94437847638016, 94437848178687, +STORE, 94437848178688, 94437848190975, +ERASE, 94437848178688, 94437848190975, +STORE, 94437847638016, 94437848330239, +STORE, 94437847638016, 94437848465407, +SNULL, 94437848444927, 94437848465407, +STORE, 94437847638016, 94437848444927, +STORE, 94437848444928, 94437848465407, +ERASE, 94437848444928, 94437848465407, +STORE, 94437847638016, 94437848584191, +STORE, 94437847638016, 94437848719359, +SNULL, 94437848678399, 94437848719359, +STORE, 94437847638016, 94437848678399, +STORE, 94437848678400, 94437848719359, +ERASE, 94437848678400, 94437848719359, +STORE, 94437847638016, 94437848842239, +SNULL, 94437848825855, 94437848842239, +STORE, 94437847638016, 94437848825855, +STORE, 94437848825856, 94437848842239, +ERASE, 94437848825856, 94437848842239, +STORE, 94437847638016, 94437848961023, +STORE, 94437847638016, 94437849096191, +STORE, 94661814710272, 94661814923263, +STORE, 94661817020416, 94661817024511, +STORE, 94661817024512, 94661817032703, +STORE, 94661817032704, 94661817044991, +STORE, 94661840424960, 94661841240063, +STORE, 140582259814400, 140582261473279, +STORE, 140582261473280, 140582263570431, +STORE, 140582263570432, 140582263586815, +STORE, 140582263586816, 140582263595007, +STORE, 140582263595008, 140582263611391, +STORE, 140582263611392, 140582263623679, +STORE, 140582263623680, 140582265716735, +STORE, 140582265716736, 140582265720831, +STORE, 140582265720832, 140582265724927, +STORE, 140582265724928, 140582265868287, +STORE, 140582266236928, 140582267920383, +STORE, 140582267920384, 140582267936767, +STORE, 140582267965440, 140582267969535, +STORE, 140582267969536, 140582267973631, +STORE, 140582267973632, 140582267977727, +STORE, 140735472508928, 140735472648191, +STORE, 140735472672768, 140735472685055, +STORE, 140735472685056, 140735472689151, +STORE, 94440069140480, 94440069353471, +STORE, 94440071450624, 94440071454719, +STORE, 94440071454720, 94440071462911, +STORE, 94440071462912, 94440071475199, +STORE, 94440072122368, 94440079048703, +STORE, 140112218095616, 140112219754495, +STORE, 140112219754496, 140112221851647, +STORE, 140112221851648, 140112221868031, +STORE, 140112221868032, 140112221876223, +STORE, 140112221876224, 140112221892607, +STORE, 140112221892608, 140112221904895, +STORE, 140112221904896, 140112223997951, +STORE, 140112223997952, 140112224002047, +STORE, 140112224002048, 140112224006143, +STORE, 140112224006144, 140112224149503, +STORE, 140112224518144, 140112226201599, +STORE, 140112226201600, 140112226217983, +STORE, 140112226246656, 140112226250751, +STORE, 140112226250752, 140112226254847, +STORE, 140112226254848, 140112226258943, +STORE, 140737460969472, 140737461108735, +STORE, 140737462083584, 140737462095871, +STORE, 140737462095872, 140737462099967, +STORE, 94257654345728, 94257654390783, +STORE, 94257656483840, 94257656487935, +STORE, 94257656487936, 94257656492031, +STORE, 94257656492032, 94257656496127, +STORE, 94257665859584, 94257665994751, +STORE, 140507070345216, 140507070386175, +STORE, 140507070386176, 140507072483327, +STORE, 140507072483328, 140507072487423, +STORE, 140507072487424, 140507072491519, +STORE, 140507072491520, 140507072516095, +STORE, 140507072516096, 140507072561151, +STORE, 140507072561152, 140507074654207, +STORE, 140507074654208, 140507074658303, +STORE, 140507074658304, 140507074662399, +STORE, 140507074662400, 140507074744319, +STORE, 140507074744320, 140507076841471, +STORE, 140507076841472, 140507076845567, +STORE, 140507076845568, 140507076849663, +STORE, 140507076849664, 140507076857855, +STORE, 140507076857856, 140507076886527, +STORE, 140507076886528, 140507078979583, +STORE, 140507078979584, 140507078983679, +STORE, 140507078983680, 140507078987775, +STORE, 140507078987776, 140507079086079, +STORE, 140507079086080, 140507081179135, +STORE, 140507081179136, 140507081183231, +STORE, 140507081183232, 140507081187327, +STORE, 140507081187328, 140507081203711, +STORE, 140507081203712, 140507081220095, +STORE, 140507081220096, 140507083317247, +STORE, 140507083317248, 140507083321343, +STORE, 140507083321344, 140507083325439, +STORE, 140507083325440, 140507083792383, +STORE, 140507083792384, 140507085885439, +STORE, 140507085885440, 140507085889535, +STORE, 140507085889536, 140507085893631, +STORE, 140507085893632, 140507085905919, +STORE, 140507085905920, 140507087998975, +STORE, 140507087998976, 140507088003071, +STORE, 140507088003072, 140507088007167, +STORE, 140507088007168, 140507088125951, +STORE, 140507088125952, 140507090219007, +STORE, 140507090219008, 140507090223103, +STORE, 140507090223104, 140507090227199, +STORE, 140507090227200, 140507090268159, +STORE, 140507090268160, 140507091927039, +STORE, 140507091927040, 140507094024191, +STORE, 140507094024192, 140507094040575, +STORE, 140507094040576, 140507094048767, +STORE, 140507094048768, 140507094065151, +STORE, 140507094065152, 140507094216703, +STORE, 140507094216704, 140507096309759, +STORE, 140507096309760, 140507096313855, +STORE, 140507096313856, 140507096317951, +STORE, 140507096317952, 140507096326143, +STORE, 140507096326144, 140507096379391, +STORE, 140507096379392, 140507098472447, +STORE, 140507098472448, 140507098476543, +STORE, 140507098476544, 140507098480639, +STORE, 140507098480640, 140507098623999, +STORE, 140507098980352, 140507100663807, +STORE, 140507100663808, 140507100692479, +STORE, 140507100721152, 140507100725247, +STORE, 140507100725248, 140507100729343, +STORE, 140507100729344, 140507100733439, +STORE, 140728152780800, 140728152915967, +STORE, 140728153698304, 140728153710591, +STORE, 140728153710592, 140728153714687, +STORE, 140507068137472, 140507070345215, +SNULL, 140507068137472, 140507068190719, +STORE, 140507068190720, 140507070345215, +STORE, 140507068137472, 140507068190719, +SNULL, 140507070287871, 140507070345215, +STORE, 140507068190720, 140507070287871, +STORE, 140507070287872, 140507070345215, +SNULL, 140507070287872, 140507070296063, +STORE, 140507070296064, 140507070345215, +STORE, 140507070287872, 140507070296063, +ERASE, 140507070287872, 140507070296063, +STORE, 140507070287872, 140507070296063, +ERASE, 140507070296064, 140507070345215, +STORE, 140507070296064, 140507070345215, +STORE, 140507100692480, 140507100721151, +STORE, 140507065810944, 140507068137471, +SNULL, 140507065810944, 140507065843711, +STORE, 140507065843712, 140507068137471, +STORE, 140507065810944, 140507065843711, +SNULL, 140507067940863, 140507068137471, +STORE, 140507065843712, 140507067940863, +STORE, 140507067940864, 140507068137471, +SNULL, 140507067940864, 140507067949055, +STORE, 140507067949056, 140507068137471, +STORE, 140507067940864, 140507067949055, +ERASE, 140507067940864, 140507067949055, +STORE, 140507067940864, 140507067949055, +ERASE, 140507067949056, 140507068137471, +STORE, 140507067949056, 140507068137471, +SNULL, 140507067944959, 140507067949055, +STORE, 140507067940864, 140507067944959, +STORE, 140507067944960, 140507067949055, +SNULL, 140507070291967, 140507070296063, +STORE, 140507070287872, 140507070291967, +STORE, 140507070291968, 140507070296063, +ERASE, 140507100692480, 140507100721151, +STORE, 140507063705600, 140507065810943, +SNULL, 140507063705600, 140507063709695, +STORE, 140507063709696, 140507065810943, +STORE, 140507063705600, 140507063709695, +SNULL, 140507065802751, 140507065810943, +STORE, 140507063709696, 140507065802751, +STORE, 140507065802752, 140507065810943, +ERASE, 140507065802752, 140507065810943, +STORE, 140507065802752, 140507065810943, +SNULL, 140507065806847, 140507065810943, +STORE, 140507065802752, 140507065806847, +STORE, 140507065806848, 140507065810943, +STORE, 140507061600256, 140507063705599, +SNULL, 140507061600256, 140507061604351, +STORE, 140507061604352, 140507063705599, +STORE, 140507061600256, 140507061604351, +SNULL, 140507063697407, 140507063705599, +STORE, 140507061604352, 140507063697407, +STORE, 140507063697408, 140507063705599, +ERASE, 140507063697408, 140507063705599, +STORE, 140507063697408, 140507063705599, +SNULL, 140507063701503, 140507063705599, +STORE, 140507063697408, 140507063701503, +STORE, 140507063701504, 140507063705599, +STORE, 140507059490816, 140507061600255, +SNULL, 140507059490816, 140507059499007, +STORE, 140507059499008, 140507061600255, +STORE, 140507059490816, 140507059499007, +SNULL, 140507061592063, 140507061600255, +STORE, 140507059499008, 140507061592063, +STORE, 140507061592064, 140507061600255, +ERASE, 140507061592064, 140507061600255, +STORE, 140507061592064, 140507061600255, +SNULL, 140507061596159, 140507061600255, +STORE, 140507061592064, 140507061596159, +STORE, 140507061596160, 140507061600255, +STORE, 140507057377280, 140507059490815, +SNULL, 140507057377280, 140507057389567, +STORE, 140507057389568, 140507059490815, +STORE, 140507057377280, 140507057389567, +SNULL, 140507059482623, 140507059490815, +STORE, 140507057389568, 140507059482623, +STORE, 140507059482624, 140507059490815, +ERASE, 140507059482624, 140507059490815, +STORE, 140507059482624, 140507059490815, +SNULL, 140507059486719, 140507059490815, +STORE, 140507059482624, 140507059486719, +STORE, 140507059486720, 140507059490815, +STORE, 140507055255552, 140507057377279, +SNULL, 140507055255552, 140507055276031, +STORE, 140507055276032, 140507057377279, +STORE, 140507055255552, 140507055276031, +SNULL, 140507057369087, 140507057377279, +STORE, 140507055276032, 140507057369087, +STORE, 140507057369088, 140507057377279, +ERASE, 140507057369088, 140507057377279, +STORE, 140507057369088, 140507057377279, +SNULL, 140507057373183, 140507057377279, +STORE, 140507057369088, 140507057373183, +STORE, 140507057373184, 140507057377279, +STORE, 140507098693632, 140507098980351, +SNULL, 140507098959871, 140507098980351, +STORE, 140507098693632, 140507098959871, +STORE, 140507098959872, 140507098980351, +SNULL, 140507098959872, 140507098976255, +STORE, 140507098976256, 140507098980351, +STORE, 140507098959872, 140507098976255, +ERASE, 140507098959872, 140507098976255, +STORE, 140507098959872, 140507098976255, +ERASE, 140507098976256, 140507098980351, +STORE, 140507098976256, 140507098980351, +STORE, 140507100692480, 140507100721151, +STORE, 140507053125632, 140507055255551, +SNULL, 140507053125632, 140507053154303, +STORE, 140507053154304, 140507055255551, +STORE, 140507053125632, 140507053154303, +SNULL, 140507055247359, 140507055255551, +STORE, 140507053154304, 140507055247359, +STORE, 140507055247360, 140507055255551, +ERASE, 140507055247360, 140507055255551, +STORE, 140507055247360, 140507055255551, +STORE, 140507051012096, 140507053125631, +SNULL, 140507051012096, 140507051024383, +STORE, 140507051024384, 140507053125631, +STORE, 140507051012096, 140507051024383, +SNULL, 140507053117439, 140507053125631, +STORE, 140507051024384, 140507053117439, +STORE, 140507053117440, 140507053125631, +ERASE, 140507053117440, 140507053125631, +STORE, 140507053117440, 140507053125631, +SNULL, 140507053121535, 140507053125631, +STORE, 140507053117440, 140507053121535, +STORE, 140507053121536, 140507053125631, +SNULL, 140507055251455, 140507055255551, +STORE, 140507055247360, 140507055251455, +STORE, 140507055251456, 140507055255551, +SNULL, 140507098972159, 140507098976255, +STORE, 140507098959872, 140507098972159, +STORE, 140507098972160, 140507098976255, +ERASE, 140507100692480, 140507100721151, +STORE, 140507100717056, 140507100721151, +ERASE, 140507100717056, 140507100721151, +STORE, 140507100717056, 140507100721151, +ERASE, 140507100717056, 140507100721151, +STORE, 140507100717056, 140507100721151, +ERASE, 140507100717056, 140507100721151, +STORE, 140507100717056, 140507100721151, +ERASE, 140507100717056, 140507100721151, +STORE, 140507100692480, 140507100721151, +ERASE, 140507068137472, 140507068190719, +ERASE, 140507068190720, 140507070287871, +ERASE, 140507070287872, 140507070291967, +ERASE, 140507070291968, 140507070296063, +ERASE, 140507070296064, 140507070345215, +ERASE, 140507065810944, 140507065843711, +ERASE, 140507065843712, 140507067940863, +ERASE, 140507067940864, 140507067944959, +ERASE, 140507067944960, 140507067949055, +ERASE, 140507067949056, 140507068137471, +ERASE, 140507063705600, 140507063709695, +ERASE, 140507063709696, 140507065802751, +ERASE, 140507065802752, 140507065806847, +ERASE, 140507065806848, 140507065810943, +ERASE, 140507061600256, 140507061604351, +ERASE, 140507061604352, 140507063697407, +ERASE, 140507063697408, 140507063701503, +ERASE, 140507063701504, 140507063705599, +ERASE, 140507059490816, 140507059499007, +ERASE, 140507059499008, 140507061592063, +ERASE, 140507061592064, 140507061596159, +ERASE, 140507061596160, 140507061600255, +ERASE, 140507057377280, 140507057389567, +ERASE, 140507057389568, 140507059482623, +ERASE, 140507059482624, 140507059486719, +ERASE, 140507059486720, 140507059490815, +ERASE, 140507055255552, 140507055276031, +ERASE, 140507055276032, 140507057369087, +ERASE, 140507057369088, 140507057373183, +ERASE, 140507057373184, 140507057377279, +ERASE, 140507098693632, 140507098959871, +ERASE, 140507098959872, 140507098972159, +ERASE, 140507098972160, 140507098976255, +ERASE, 140507098976256, 140507098980351, +ERASE, 140507051012096, 140507051024383, +ERASE, 140507051024384, 140507053117439, +ERASE, 140507053117440, 140507053121535, +ERASE, 140507053121536, 140507053125631, +STORE, 94036448296960, 94036448509951, +STORE, 94036450607104, 94036450611199, +STORE, 94036450611200, 94036450619391, +STORE, 94036450619392, 94036450631679, +STORE, 94036482445312, 94036502376447, +STORE, 140469487013888, 140469488672767, +STORE, 140469488672768, 140469490769919, +STORE, 140469490769920, 140469490786303, +STORE, 140469490786304, 140469490794495, +STORE, 140469490794496, 140469490810879, +STORE, 140469490810880, 140469490823167, +STORE, 140469490823168, 140469492916223, +STORE, 140469492916224, 140469492920319, +STORE, 140469492920320, 140469492924415, +STORE, 140469492924416, 140469493067775, +STORE, 140469493436416, 140469495119871, +STORE, 140469495119872, 140469495136255, +STORE, 140469495164928, 140469495169023, +STORE, 140469495169024, 140469495173119, +STORE, 140469495173120, 140469495177215, +STORE, 140732281446400, 140732281585663, +STORE, 140732282736640, 140732282748927, +STORE, 140732282748928, 140732282753023, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723411931136, 140737488351231, +SNULL, 140723411939327, 140737488351231, +STORE, 140723411931136, 140723411939327, +STORE, 140723411800064, 140723411939327, +STORE, 93993768685568, 93993770909695, +SNULL, 93993768796159, 93993770909695, +STORE, 93993768685568, 93993768796159, +STORE, 93993768796160, 93993770909695, +ERASE, 93993768796160, 93993770909695, +STORE, 93993770889216, 93993770901503, +STORE, 93993770901504, 93993770909695, +STORE, 140508681740288, 140508683993087, +SNULL, 140508681883647, 140508683993087, +STORE, 140508681740288, 140508681883647, +STORE, 140508681883648, 140508683993087, +ERASE, 140508681883648, 140508683993087, +STORE, 140508683980800, 140508683988991, +STORE, 140508683988992, 140508683993087, +STORE, 140723412070400, 140723412074495, +STORE, 140723412058112, 140723412070399, +STORE, 140508683952128, 140508683980799, +STORE, 140508683943936, 140508683952127, +STORE, 140508677943296, 140508681740287, +SNULL, 140508677943296, 140508679602175, +STORE, 140508679602176, 140508681740287, +STORE, 140508677943296, 140508679602175, +SNULL, 140508681699327, 140508681740287, +STORE, 140508679602176, 140508681699327, +STORE, 140508681699328, 140508681740287, +SNULL, 140508681699328, 140508681723903, +STORE, 140508681723904, 140508681740287, +STORE, 140508681699328, 140508681723903, +ERASE, 140508681699328, 140508681723903, +STORE, 140508681699328, 140508681723903, +ERASE, 140508681723904, 140508681740287, +STORE, 140508681723904, 140508681740287, +SNULL, 140508681715711, 140508681723903, +STORE, 140508681699328, 140508681715711, +STORE, 140508681715712, 140508681723903, +SNULL, 93993770897407, 93993770901503, +STORE, 93993770889216, 93993770897407, +STORE, 93993770897408, 93993770901503, +SNULL, 140508683984895, 140508683988991, +STORE, 140508683980800, 140508683984895, +STORE, 140508683984896, 140508683988991, +ERASE, 140508683952128, 140508683980799, +STORE, 93993791582208, 93993791717375, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140734685458432, 140737488351231, +SNULL, 140734685466623, 140737488351231, +STORE, 140734685458432, 140734685466623, +STORE, 140734685327360, 140734685466623, +STORE, 93832321548288, 93832323772415, +SNULL, 93832321658879, 93832323772415, +STORE, 93832321548288, 93832321658879, +STORE, 93832321658880, 93832323772415, +ERASE, 93832321658880, 93832323772415, +STORE, 93832323751936, 93832323764223, +STORE, 93832323764224, 93832323772415, +STORE, 140650945118208, 140650947371007, +SNULL, 140650945261567, 140650947371007, +STORE, 140650945118208, 140650945261567, +STORE, 140650945261568, 140650947371007, +ERASE, 140650945261568, 140650947371007, +STORE, 140650947358720, 140650947366911, +STORE, 140650947366912, 140650947371007, +STORE, 140734686081024, 140734686085119, +STORE, 140734686068736, 140734686081023, +STORE, 140650947330048, 140650947358719, +STORE, 140650947321856, 140650947330047, +STORE, 140650941321216, 140650945118207, +SNULL, 140650941321216, 140650942980095, +STORE, 140650942980096, 140650945118207, +STORE, 140650941321216, 140650942980095, +SNULL, 140650945077247, 140650945118207, +STORE, 140650942980096, 140650945077247, +STORE, 140650945077248, 140650945118207, +SNULL, 140650945077248, 140650945101823, +STORE, 140650945101824, 140650945118207, +STORE, 140650945077248, 140650945101823, +ERASE, 140650945077248, 140650945101823, +STORE, 140650945077248, 140650945101823, +ERASE, 140650945101824, 140650945118207, +STORE, 140650945101824, 140650945118207, +SNULL, 140650945093631, 140650945101823, +STORE, 140650945077248, 140650945093631, +STORE, 140650945093632, 140650945101823, +SNULL, 93832323760127, 93832323764223, +STORE, 93832323751936, 93832323760127, +STORE, 93832323760128, 93832323764223, +SNULL, 140650947362815, 140650947366911, +STORE, 140650947358720, 140650947362815, +STORE, 140650947362816, 140650947366911, +ERASE, 140650947330048, 140650947358719, +STORE, 93832331890688, 93832332025855, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728333520896, 140737488351231, +SNULL, 140728333529087, 140737488351231, +STORE, 140728333520896, 140728333529087, +STORE, 140728333389824, 140728333529087, +STORE, 94872734732288, 94872736956415, +SNULL, 94872734842879, 94872736956415, +STORE, 94872734732288, 94872734842879, +STORE, 94872734842880, 94872736956415, +ERASE, 94872734842880, 94872736956415, +STORE, 94872736935936, 94872736948223, +STORE, 94872736948224, 94872736956415, +STORE, 139755193257984, 139755195510783, +SNULL, 139755193401343, 139755195510783, +STORE, 139755193257984, 139755193401343, +STORE, 139755193401344, 139755195510783, +ERASE, 139755193401344, 139755195510783, +STORE, 139755195498496, 139755195506687, +STORE, 139755195506688, 139755195510783, +STORE, 140728333926400, 140728333930495, +STORE, 140728333914112, 140728333926399, +STORE, 139755195469824, 139755195498495, +STORE, 139755195461632, 139755195469823, +STORE, 139755189460992, 139755193257983, +SNULL, 139755189460992, 139755191119871, +STORE, 139755191119872, 139755193257983, +STORE, 139755189460992, 139755191119871, +SNULL, 139755193217023, 139755193257983, +STORE, 139755191119872, 139755193217023, +STORE, 139755193217024, 139755193257983, +SNULL, 139755193217024, 139755193241599, +STORE, 139755193241600, 139755193257983, +STORE, 139755193217024, 139755193241599, +ERASE, 139755193217024, 139755193241599, +STORE, 139755193217024, 139755193241599, +ERASE, 139755193241600, 139755193257983, +STORE, 139755193241600, 139755193257983, +SNULL, 139755193233407, 139755193241599, +STORE, 139755193217024, 139755193233407, +STORE, 139755193233408, 139755193241599, +SNULL, 94872736944127, 94872736948223, +STORE, 94872736935936, 94872736944127, +STORE, 94872736944128, 94872736948223, +SNULL, 139755195502591, 139755195506687, +STORE, 139755195498496, 139755195502591, +STORE, 139755195502592, 139755195506687, +ERASE, 139755195469824, 139755195498495, +STORE, 94872749744128, 94872749879295, +STORE, 94720243642368, 94720243855359, +STORE, 94720245952512, 94720245956607, +STORE, 94720245956608, 94720245964799, +STORE, 94720245964800, 94720245977087, +STORE, 94720277745664, 94720278151167, +STORE, 140453174497280, 140453176156159, +STORE, 140453176156160, 140453178253311, +STORE, 140453178253312, 140453178269695, +STORE, 140453178269696, 140453178277887, +STORE, 140453178277888, 140453178294271, +STORE, 140453178294272, 140453178306559, +STORE, 140453178306560, 140453180399615, +STORE, 140453180399616, 140453180403711, +STORE, 140453180403712, 140453180407807, +STORE, 140453180407808, 140453180551167, +STORE, 140453180919808, 140453182603263, +STORE, 140453182603264, 140453182619647, +STORE, 140453182648320, 140453182652415, +STORE, 140453182652416, 140453182656511, +STORE, 140453182656512, 140453182660607, +STORE, 140733223923712, 140733224062975, +STORE, 140733224808448, 140733224820735, +STORE, 140733224820736, 140733224824831, +STORE, 94321091141632, 94321091354623, +STORE, 94321093451776, 94321093455871, +STORE, 94321093455872, 94321093464063, +STORE, 94321093464064, 94321093476351, +STORE, 94321115873280, 94321117229055, +STORE, 139695978840064, 139695980498943, +STORE, 139695980498944, 139695982596095, +STORE, 139695982596096, 139695982612479, +STORE, 139695982612480, 139695982620671, +STORE, 139695982620672, 139695982637055, +STORE, 139695982637056, 139695982649343, +STORE, 139695982649344, 139695984742399, +STORE, 139695984742400, 139695984746495, +STORE, 139695984746496, 139695984750591, +STORE, 139695984750592, 139695984893951, +STORE, 139695985262592, 139695986946047, +STORE, 139695986946048, 139695986962431, +STORE, 139695986991104, 139695986995199, +STORE, 139695986995200, 139695986999295, +STORE, 139695986999296, 139695987003391, +STORE, 140734650564608, 140734650703871, +STORE, 140734650785792, 140734650798079, +STORE, 140734650798080, 140734650802175, +STORE, 94523438456832, 94523438669823, +STORE, 94523440766976, 94523440771071, +STORE, 94523440771072, 94523440779263, +STORE, 94523440779264, 94523440791551, +STORE, 94523464544256, 94523465842687, +STORE, 140453231493120, 140453233151999, +STORE, 140453233152000, 140453235249151, +STORE, 140453235249152, 140453235265535, +STORE, 140453235265536, 140453235273727, +STORE, 140453235273728, 140453235290111, +STORE, 140453235290112, 140453235302399, +STORE, 140453235302400, 140453237395455, +STORE, 140453237395456, 140453237399551, +STORE, 140453237399552, 140453237403647, +STORE, 140453237403648, 140453237547007, +STORE, 140453237915648, 140453239599103, +STORE, 140453239599104, 140453239615487, +STORE, 140453239644160, 140453239648255, +STORE, 140453239648256, 140453239652351, +STORE, 140453239652352, 140453239656447, +STORE, 140734679445504, 140734679584767, +STORE, 140734680018944, 140734680031231, +STORE, 140734680031232, 140734680035327, +STORE, 94614776987648, 94614777200639, +STORE, 94614779297792, 94614779301887, +STORE, 94614779301888, 94614779310079, +STORE, 94614779310080, 94614779322367, +STORE, 94614798467072, 94614800699391, +STORE, 139677037182976, 139677038841855, +STORE, 139677038841856, 139677040939007, +STORE, 139677040939008, 139677040955391, +STORE, 139677040955392, 139677040963583, +STORE, 139677040963584, 139677040979967, +STORE, 139677040979968, 139677040992255, +STORE, 139677040992256, 139677043085311, +STORE, 139677043085312, 139677043089407, +STORE, 139677043089408, 139677043093503, +STORE, 139677043093504, 139677043236863, +STORE, 139677043605504, 139677045288959, +STORE, 139677045288960, 139677045305343, +STORE, 139677045334016, 139677045338111, +STORE, 139677045338112, 139677045342207, +STORE, 139677045342208, 139677045346303, +STORE, 140721604411392, 140721604550655, +STORE, 140721606135808, 140721606148095, +STORE, 140721606148096, 140721606152191, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140729280544768, 140737488351231, +SNULL, 140729280552959, 140737488351231, +STORE, 140729280544768, 140729280552959, +STORE, 140729280413696, 140729280552959, +STORE, 94863939334144, 94863941558271, +SNULL, 94863939444735, 94863941558271, +STORE, 94863939334144, 94863939444735, +STORE, 94863939444736, 94863941558271, +ERASE, 94863939444736, 94863941558271, +STORE, 94863941537792, 94863941550079, +STORE, 94863941550080, 94863941558271, +STORE, 139691047276544, 139691049529343, +SNULL, 139691047419903, 139691049529343, +STORE, 139691047276544, 139691047419903, +STORE, 139691047419904, 139691049529343, +ERASE, 139691047419904, 139691049529343, +STORE, 139691049517056, 139691049525247, +STORE, 139691049525248, 139691049529343, +STORE, 140729281679360, 140729281683455, +STORE, 140729281667072, 140729281679359, +STORE, 139691049488384, 139691049517055, +STORE, 139691049480192, 139691049488383, +STORE, 139691043479552, 139691047276543, +SNULL, 139691043479552, 139691045138431, +STORE, 139691045138432, 139691047276543, +STORE, 139691043479552, 139691045138431, +SNULL, 139691047235583, 139691047276543, +STORE, 139691045138432, 139691047235583, +STORE, 139691047235584, 139691047276543, +SNULL, 139691047235584, 139691047260159, +STORE, 139691047260160, 139691047276543, +STORE, 139691047235584, 139691047260159, +ERASE, 139691047235584, 139691047260159, +STORE, 139691047235584, 139691047260159, +ERASE, 139691047260160, 139691047276543, +STORE, 139691047260160, 139691047276543, +SNULL, 139691047251967, 139691047260159, +STORE, 139691047235584, 139691047251967, +STORE, 139691047251968, 139691047260159, +SNULL, 94863941545983, 94863941550079, +STORE, 94863941537792, 94863941545983, +STORE, 94863941545984, 94863941550079, +SNULL, 139691049521151, 139691049525247, +STORE, 139691049517056, 139691049521151, +STORE, 139691049521152, 139691049525247, +ERASE, 139691049488384, 139691049517055, +STORE, 94863951294464, 94863951429631, +STORE, 93998209294336, 93998209507327, +STORE, 93998211604480, 93998211608575, +STORE, 93998211608576, 93998211616767, +STORE, 93998211616768, 93998211629055, +STORE, 93998227210240, 93998227615743, +STORE, 140243029913600, 140243031572479, +STORE, 140243031572480, 140243033669631, +STORE, 140243033669632, 140243033686015, +STORE, 140243033686016, 140243033694207, +STORE, 140243033694208, 140243033710591, +STORE, 140243033710592, 140243033722879, +STORE, 140243033722880, 140243035815935, +STORE, 140243035815936, 140243035820031, +STORE, 140243035820032, 140243035824127, +STORE, 140243035824128, 140243035967487, +STORE, 140243036336128, 140243038019583, +STORE, 140243038019584, 140243038035967, +STORE, 140243038064640, 140243038068735, +STORE, 140243038068736, 140243038072831, +STORE, 140243038072832, 140243038076927, +STORE, 140734976479232, 140734976618495, +STORE, 140734977978368, 140734977990655, +STORE, 140734977990656, 140734977994751, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722742775808, 140737488351231, +SNULL, 140722742783999, 140737488351231, +STORE, 140722742775808, 140722742783999, +STORE, 140722742644736, 140722742783999, +STORE, 93857673662464, 93857675997183, +SNULL, 93857673875455, 93857675997183, +STORE, 93857673662464, 93857673875455, +STORE, 93857673875456, 93857675997183, +ERASE, 93857673875456, 93857675997183, +STORE, 93857675972608, 93857675984895, +STORE, 93857675984896, 93857675997183, +STORE, 140629677498368, 140629679751167, +SNULL, 140629677641727, 140629679751167, +STORE, 140629677498368, 140629677641727, +STORE, 140629677641728, 140629679751167, +ERASE, 140629677641728, 140629679751167, +STORE, 140629679738880, 140629679747071, +STORE, 140629679747072, 140629679751167, +STORE, 140722743222272, 140722743226367, +STORE, 140722743209984, 140722743222271, +STORE, 140629679710208, 140629679738879, +STORE, 140629679702016, 140629679710207, +STORE, 140629675384832, 140629677498367, +SNULL, 140629675384832, 140629675397119, +STORE, 140629675397120, 140629677498367, +STORE, 140629675384832, 140629675397119, +SNULL, 140629677490175, 140629677498367, +STORE, 140629675397120, 140629677490175, +STORE, 140629677490176, 140629677498367, +ERASE, 140629677490176, 140629677498367, +STORE, 140629677490176, 140629677498367, +STORE, 140629671587840, 140629675384831, +SNULL, 140629671587840, 140629673246719, +STORE, 140629673246720, 140629675384831, +STORE, 140629671587840, 140629673246719, +SNULL, 140629675343871, 140629675384831, +STORE, 140629673246720, 140629675343871, +STORE, 140629675343872, 140629675384831, +SNULL, 140629675343872, 140629675368447, +STORE, 140629675368448, 140629675384831, +STORE, 140629675343872, 140629675368447, +ERASE, 140629675343872, 140629675368447, +STORE, 140629675343872, 140629675368447, +ERASE, 140629675368448, 140629675384831, +STORE, 140629675368448, 140629675384831, +STORE, 140629679693824, 140629679710207, +SNULL, 140629675360255, 140629675368447, +STORE, 140629675343872, 140629675360255, +STORE, 140629675360256, 140629675368447, +SNULL, 140629677494271, 140629677498367, +STORE, 140629677490176, 140629677494271, +STORE, 140629677494272, 140629677498367, +SNULL, 93857675976703, 93857675984895, +STORE, 93857675972608, 93857675976703, +STORE, 93857675976704, 93857675984895, +SNULL, 140629679742975, 140629679747071, +STORE, 140629679738880, 140629679742975, +STORE, 140629679742976, 140629679747071, +ERASE, 140629679710208, 140629679738879, +STORE, 93857705832448, 93857705967615, +STORE, 140629678010368, 140629679693823, +STORE, 93857705832448, 93857706102783, +STORE, 93857705832448, 93857706237951, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735922421760, 140737488351231, +SNULL, 140735922429951, 140737488351231, +STORE, 140735922421760, 140735922429951, +STORE, 140735922290688, 140735922429951, +STORE, 94651136139264, 94651138363391, +SNULL, 94651136249855, 94651138363391, +STORE, 94651136139264, 94651136249855, +STORE, 94651136249856, 94651138363391, +ERASE, 94651136249856, 94651138363391, +STORE, 94651138342912, 94651138355199, +STORE, 94651138355200, 94651138363391, +STORE, 140325788266496, 140325790519295, +SNULL, 140325788409855, 140325790519295, +STORE, 140325788266496, 140325788409855, +STORE, 140325788409856, 140325790519295, +ERASE, 140325788409856, 140325790519295, +STORE, 140325790507008, 140325790515199, +STORE, 140325790515200, 140325790519295, +STORE, 140735923572736, 140735923576831, +STORE, 140735923560448, 140735923572735, +STORE, 140325790478336, 140325790507007, +STORE, 140325790470144, 140325790478335, +STORE, 140325784469504, 140325788266495, +SNULL, 140325784469504, 140325786128383, +STORE, 140325786128384, 140325788266495, +STORE, 140325784469504, 140325786128383, +SNULL, 140325788225535, 140325788266495, +STORE, 140325786128384, 140325788225535, +STORE, 140325788225536, 140325788266495, +SNULL, 140325788225536, 140325788250111, +STORE, 140325788250112, 140325788266495, +STORE, 140325788225536, 140325788250111, +ERASE, 140325788225536, 140325788250111, +STORE, 140325788225536, 140325788250111, +ERASE, 140325788250112, 140325788266495, +STORE, 140325788250112, 140325788266495, +SNULL, 140325788241919, 140325788250111, +STORE, 140325788225536, 140325788241919, +STORE, 140325788241920, 140325788250111, +SNULL, 94651138351103, 94651138355199, +STORE, 94651138342912, 94651138351103, +STORE, 94651138351104, 94651138355199, +SNULL, 140325790511103, 140325790515199, +STORE, 140325790507008, 140325790511103, +STORE, 140325790511104, 140325790515199, +ERASE, 140325790478336, 140325790507007, +STORE, 94651146297344, 94651146432511, +STORE, 94212330168320, 94212330381311, +STORE, 94212332478464, 94212332482559, +STORE, 94212332482560, 94212332490751, +STORE, 94212332490752, 94212332503039, +STORE, 94212348891136, 94212349825023, +STORE, 140611630604288, 140611632263167, +STORE, 140611632263168, 140611634360319, +STORE, 140611634360320, 140611634376703, +STORE, 140611634376704, 140611634384895, +STORE, 140611634384896, 140611634401279, +STORE, 140611634401280, 140611634413567, +STORE, 140611634413568, 140611636506623, +STORE, 140611636506624, 140611636510719, +STORE, 140611636510720, 140611636514815, +STORE, 140611636514816, 140611636658175, +STORE, 140611637026816, 140611638710271, +STORE, 140611638710272, 140611638726655, +STORE, 140611638755328, 140611638759423, +STORE, 140611638759424, 140611638763519, +STORE, 140611638763520, 140611638767615, +STORE, 140726974533632, 140726974672895, +STORE, 140726974943232, 140726974955519, +STORE, 140726974955520, 140726974959615, +STORE, 94572463521792, 94572463734783, +STORE, 94572465831936, 94572465836031, +STORE, 94572465836032, 94572465844223, +STORE, 94572465844224, 94572465856511, +STORE, 94572491534336, 94572492865535, +STORE, 140644351492096, 140644353150975, +STORE, 140644353150976, 140644355248127, +STORE, 140644355248128, 140644355264511, +STORE, 140644355264512, 140644355272703, +STORE, 140644355272704, 140644355289087, +STORE, 140644355289088, 140644355301375, +STORE, 140644355301376, 140644357394431, +STORE, 140644357394432, 140644357398527, +STORE, 140644357398528, 140644357402623, +STORE, 140644357402624, 140644357545983, +STORE, 140644357914624, 140644359598079, +STORE, 140644359598080, 140644359614463, +STORE, 140644359643136, 140644359647231, +STORE, 140644359647232, 140644359651327, +STORE, 140644359651328, 140644359655423, +STORE, 140727841824768, 140727841964031, +STORE, 140727843188736, 140727843201023, +STORE, 140727843201024, 140727843205119, +STORE, 94144315457536, 94144315670527, +STORE, 94144317767680, 94144317771775, +STORE, 94144317771776, 94144317779967, +STORE, 94144317779968, 94144317792255, +STORE, 94144318369792, 94144320815103, +STORE, 140316717645824, 140316719304703, +STORE, 140316719304704, 140316721401855, +STORE, 140316721401856, 140316721418239, +STORE, 140316721418240, 140316721426431, +STORE, 140316721426432, 140316721442815, +STORE, 140316721442816, 140316721455103, +STORE, 140316721455104, 140316723548159, +STORE, 140316723548160, 140316723552255, +STORE, 140316723552256, 140316723556351, +STORE, 140316723556352, 140316723699711, +STORE, 140316724068352, 140316725751807, +STORE, 140316725751808, 140316725768191, +STORE, 140316725796864, 140316725800959, +STORE, 140316725800960, 140316725805055, +STORE, 140316725805056, 140316725809151, +STORE, 140725744283648, 140725744422911, +STORE, 140725745852416, 140725745864703, +STORE, 140725745864704, 140725745868799, +STORE, 94646858846208, 94646859059199, +STORE, 94646861156352, 94646861160447, +STORE, 94646861160448, 94646861168639, +STORE, 94646861168640, 94646861180927, +STORE, 94646879805440, 94646881894399, +STORE, 140435449745408, 140435451404287, +STORE, 140435451404288, 140435453501439, +STORE, 140435453501440, 140435453517823, +STORE, 140435453517824, 140435453526015, +STORE, 140435453526016, 140435453542399, +STORE, 140435453542400, 140435453554687, +STORE, 140435453554688, 140435455647743, +STORE, 140435455647744, 140435455651839, +STORE, 140435455651840, 140435455655935, +STORE, 140435455655936, 140435455799295, +STORE, 140435456167936, 140435457851391, +STORE, 140435457851392, 140435457867775, +STORE, 140435457896448, 140435457900543, +STORE, 140435457900544, 140435457904639, +STORE, 140435457904640, 140435457908735, +STORE, 140721033818112, 140721033957375, +STORE, 140721034018816, 140721034031103, +STORE, 140721034031104, 140721034035199, +STORE, 94872903438336, 94872903651327, +STORE, 94872905748480, 94872905752575, +STORE, 94872905752576, 94872905760767, +STORE, 94872905760768, 94872905773055, +STORE, 94872931246080, 94872931651583, +STORE, 139771607810048, 139771609468927, +STORE, 139771609468928, 139771611566079, +STORE, 139771611566080, 139771611582463, +STORE, 139771611582464, 139771611590655, +STORE, 139771611590656, 139771611607039, +STORE, 139771611607040, 139771611619327, +STORE, 139771611619328, 139771613712383, +STORE, 139771613712384, 139771613716479, +STORE, 139771613716480, 139771613720575, +STORE, 139771613720576, 139771613863935, +STORE, 139771614232576, 139771615916031, +STORE, 139771615916032, 139771615932415, +STORE, 139771615961088, 139771615965183, +STORE, 139771615965184, 139771615969279, +STORE, 139771615969280, 139771615973375, +STORE, 140725402931200, 140725403070463, +STORE, 140725403852800, 140725403865087, +STORE, 140725403865088, 140725403869183, +STORE, 94740737736704, 94740737949695, +STORE, 94740740046848, 94740740050943, +STORE, 94740740050944, 94740740059135, +STORE, 94740740059136, 94740740071423, +STORE, 94740743249920, 94740744724479, +STORE, 140640287010816, 140640288669695, +STORE, 140640288669696, 140640290766847, +STORE, 140640290766848, 140640290783231, +STORE, 140640290783232, 140640290791423, +STORE, 140640290791424, 140640290807807, +STORE, 140640290807808, 140640290820095, +STORE, 140640290820096, 140640292913151, +STORE, 140640292913152, 140640292917247, +STORE, 140640292917248, 140640292921343, +STORE, 140640292921344, 140640293064703, +STORE, 140640293433344, 140640295116799, +STORE, 140640295116800, 140640295133183, +STORE, 140640295161856, 140640295165951, +STORE, 140640295165952, 140640295170047, +STORE, 140640295170048, 140640295174143, +STORE, 140725133303808, 140725133443071, +STORE, 140725133684736, 140725133697023, +STORE, 140725133697024, 140725133701119, +STORE, 140737488347136, 140737488351231, +STORE, 140722826371072, 140737488351231, +SNULL, 140722826375167, 140737488351231, +STORE, 140722826371072, 140722826375167, +STORE, 140722826240000, 140722826375167, +STORE, 94113818611712, 94113820835839, +SNULL, 94113818722303, 94113820835839, +STORE, 94113818611712, 94113818722303, +STORE, 94113818722304, 94113820835839, +ERASE, 94113818722304, 94113820835839, +STORE, 94113820815360, 94113820827647, +STORE, 94113820827648, 94113820835839, +STORE, 139628194508800, 139628196761599, +SNULL, 139628194652159, 139628196761599, +STORE, 139628194508800, 139628194652159, +STORE, 139628194652160, 139628196761599, +ERASE, 139628194652160, 139628196761599, +STORE, 139628196749312, 139628196757503, +STORE, 139628196757504, 139628196761599, +STORE, 140722826727424, 140722826731519, +STORE, 140722826715136, 140722826727423, +STORE, 139628196720640, 139628196749311, +STORE, 139628196712448, 139628196720639, +STORE, 139628190711808, 139628194508799, +SNULL, 139628190711808, 139628192370687, +STORE, 139628192370688, 139628194508799, +STORE, 139628190711808, 139628192370687, +SNULL, 139628194467839, 139628194508799, +STORE, 139628192370688, 139628194467839, +STORE, 139628194467840, 139628194508799, +SNULL, 139628194467840, 139628194492415, +STORE, 139628194492416, 139628194508799, +STORE, 139628194467840, 139628194492415, +ERASE, 139628194467840, 139628194492415, +STORE, 139628194467840, 139628194492415, +ERASE, 139628194492416, 139628194508799, +STORE, 139628194492416, 139628194508799, +SNULL, 139628194484223, 139628194492415, +STORE, 139628194467840, 139628194484223, +STORE, 139628194484224, 139628194492415, +SNULL, 94113820823551, 94113820827647, +STORE, 94113820815360, 94113820823551, +STORE, 94113820823552, 94113820827647, +SNULL, 139628196753407, 139628196757503, +STORE, 139628196749312, 139628196753407, +STORE, 139628196753408, 139628196757503, +ERASE, 139628196720640, 139628196749311, +STORE, 94113830850560, 94113830985727, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731865833472, 140737488351231, +SNULL, 140731865841663, 140737488351231, +STORE, 140731865833472, 140731865841663, +STORE, 140731865702400, 140731865841663, +STORE, 94763339386880, 94763341611007, +SNULL, 94763339497471, 94763341611007, +STORE, 94763339386880, 94763339497471, +STORE, 94763339497472, 94763341611007, +ERASE, 94763339497472, 94763341611007, +STORE, 94763341590528, 94763341602815, +STORE, 94763341602816, 94763341611007, +STORE, 139778398486528, 139778400739327, +SNULL, 139778398629887, 139778400739327, +STORE, 139778398486528, 139778398629887, +STORE, 139778398629888, 139778400739327, +ERASE, 139778398629888, 139778400739327, +STORE, 139778400727040, 139778400735231, +STORE, 139778400735232, 139778400739327, +STORE, 140731865858048, 140731865862143, +STORE, 140731865845760, 140731865858047, +STORE, 139778400698368, 139778400727039, +STORE, 139778400690176, 139778400698367, +STORE, 139778394689536, 139778398486527, +SNULL, 139778394689536, 139778396348415, +STORE, 139778396348416, 139778398486527, +STORE, 139778394689536, 139778396348415, +SNULL, 139778398445567, 139778398486527, +STORE, 139778396348416, 139778398445567, +STORE, 139778398445568, 139778398486527, +SNULL, 139778398445568, 139778398470143, +STORE, 139778398470144, 139778398486527, +STORE, 139778398445568, 139778398470143, +ERASE, 139778398445568, 139778398470143, +STORE, 139778398445568, 139778398470143, +ERASE, 139778398470144, 139778398486527, +STORE, 139778398470144, 139778398486527, +SNULL, 139778398461951, 139778398470143, +STORE, 139778398445568, 139778398461951, +STORE, 139778398461952, 139778398470143, +SNULL, 94763341598719, 94763341602815, +STORE, 94763341590528, 94763341598719, +STORE, 94763341598720, 94763341602815, +SNULL, 139778400731135, 139778400735231, +STORE, 139778400727040, 139778400731135, +STORE, 139778400731136, 139778400735231, +ERASE, 139778400698368, 139778400727039, +STORE, 94763362197504, 94763362332671, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140737488338944, 140737488351231, +STORE, 140732053192704, 140737488351231, +SNULL, 140732053204991, 140737488351231, +STORE, 140732053192704, 140732053204991, +STORE, 140732053061632, 140732053204991, +STORE, 4194304, 26279935, +STORE, 28372992, 28454911, +STORE, 28454912, 29806591, +STORE, 140176018599936, 140176020852735, +SNULL, 140176018743295, 140176020852735, +STORE, 140176018599936, 140176018743295, +STORE, 140176018743296, 140176020852735, +ERASE, 140176018743296, 140176020852735, +STORE, 140176020840448, 140176020848639, +STORE, 140176020848640, 140176020852735, +STORE, 140732053381120, 140732053385215, +STORE, 140732053368832, 140732053381119, +STORE, 140176020811776, 140176020840447, +STORE, 140176020803584, 140176020811775, +STORE, 140176014766080, 140176018599935, +SNULL, 140176014766080, 140176016474111, +STORE, 140176016474112, 140176018599935, +STORE, 140176014766080, 140176016474111, +SNULL, 140176018567167, 140176018599935, +STORE, 140176016474112, 140176018567167, +STORE, 140176018567168, 140176018599935, +ERASE, 140176018567168, 140176018599935, +STORE, 140176018567168, 140176018599935, +STORE, 140176012570624, 140176014766079, +SNULL, 140176012570624, 140176012664831, +STORE, 140176012664832, 140176014766079, +STORE, 140176012570624, 140176012664831, +SNULL, 140176014757887, 140176014766079, +STORE, 140176012664832, 140176014757887, +STORE, 140176014757888, 140176014766079, +ERASE, 140176014757888, 140176014766079, +STORE, 140176014757888, 140176014766079, +STORE, 140176010051584, 140176012570623, +SNULL, 140176010051584, 140176010465279, +STORE, 140176010465280, 140176012570623, +STORE, 140176010051584, 140176010465279, +SNULL, 140176012558335, 140176012570623, +STORE, 140176010465280, 140176012558335, +STORE, 140176012558336, 140176012570623, +ERASE, 140176012558336, 140176012570623, +STORE, 140176012558336, 140176012570623, +STORE, 140176007417856, 140176010051583, +SNULL, 140176007417856, 140176007946239, +STORE, 140176007946240, 140176010051583, +STORE, 140176007417856, 140176007946239, +SNULL, 140176010043391, 140176010051583, +STORE, 140176007946240, 140176010043391, +STORE, 140176010043392, 140176010051583, +ERASE, 140176010043392, 140176010051583, +STORE, 140176010043392, 140176010051583, +STORE, 140176005304320, 140176007417855, +SNULL, 140176005304320, 140176005316607, +STORE, 140176005316608, 140176007417855, +STORE, 140176005304320, 140176005316607, +SNULL, 140176007409663, 140176007417855, +STORE, 140176005316608, 140176007409663, +STORE, 140176007409664, 140176007417855, +ERASE, 140176007409664, 140176007417855, +STORE, 140176007409664, 140176007417855, +STORE, 140176003100672, 140176005304319, +SNULL, 140176003100672, 140176003203071, +STORE, 140176003203072, 140176005304319, +STORE, 140176003100672, 140176003203071, +SNULL, 140176005296127, 140176005304319, +STORE, 140176003203072, 140176005296127, +STORE, 140176005296128, 140176005304319, +ERASE, 140176005296128, 140176005304319, +STORE, 140176005296128, 140176005304319, +STORE, 140176020795392, 140176020811775, +STORE, 140175999938560, 140176003100671, +SNULL, 140175999938560, 140176000999423, +STORE, 140176000999424, 140176003100671, +STORE, 140175999938560, 140176000999423, +SNULL, 140176003092479, 140176003100671, +STORE, 140176000999424, 140176003092479, +STORE, 140176003092480, 140176003100671, +ERASE, 140176003092480, 140176003100671, +STORE, 140176003092480, 140176003100671, +STORE, 140175996141568, 140175999938559, +SNULL, 140175996141568, 140175997800447, +STORE, 140175997800448, 140175999938559, +STORE, 140175996141568, 140175997800447, +SNULL, 140175999897599, 140175999938559, +STORE, 140175997800448, 140175999897599, +STORE, 140175999897600, 140175999938559, +SNULL, 140175999897600, 140175999922175, +STORE, 140175999922176, 140175999938559, +STORE, 140175999897600, 140175999922175, +ERASE, 140175999897600, 140175999922175, +STORE, 140175999897600, 140175999922175, +ERASE, 140175999922176, 140175999938559, +STORE, 140175999922176, 140175999938559, +STORE, 140176020783104, 140176020811775, +SNULL, 140175999913983, 140175999922175, +STORE, 140175999897600, 140175999913983, +STORE, 140175999913984, 140175999922175, +SNULL, 140176003096575, 140176003100671, +STORE, 140176003092480, 140176003096575, +STORE, 140176003096576, 140176003100671, +SNULL, 140176005300223, 140176005304319, +STORE, 140176005296128, 140176005300223, +STORE, 140176005300224, 140176005304319, +SNULL, 140176007413759, 140176007417855, +STORE, 140176007409664, 140176007413759, +STORE, 140176007413760, 140176007417855, +SNULL, 140176010047487, 140176010051583, +STORE, 140176010043392, 140176010047487, +STORE, 140176010047488, 140176010051583, +SNULL, 140176012566527, 140176012570623, +STORE, 140176012558336, 140176012566527, +STORE, 140176012566528, 140176012570623, +SNULL, 140176014761983, 140176014766079, +STORE, 140176014757888, 140176014761983, +STORE, 140176014761984, 140176014766079, +SNULL, 140176018571263, 140176018599935, +STORE, 140176018567168, 140176018571263, +STORE, 140176018571264, 140176018599935, +SNULL, 28405759, 28454911, +STORE, 28372992, 28405759, +STORE, 28405760, 28454911, +SNULL, 140176020844543, 140176020848639, +STORE, 140176020840448, 140176020844543, +STORE, 140176020844544, 140176020848639, +ERASE, 140176020811776, 140176020840447, +STORE, 53080064, 53215231, +STORE, 140176019099648, 140176020783103, +STORE, 140176020836352, 140176020840447, +STORE, 140176018964480, 140176019099647, +STORE, 53080064, 53358591, +STORE, 140175994044416, 140175996141567, +STORE, 140176020828160, 140176020840447, +STORE, 140176020819968, 140176020840447, +STORE, 140176020783104, 140176020819967, +STORE, 140176018948096, 140176019099647, +STORE, 53080064, 53493759, +STORE, 53080064, 53649407, +STORE, 140176018939904, 140176019099647, +STORE, 140176018931712, 140176019099647, +STORE, 53080064, 53784575, +STORE, 53080064, 53919743, +STORE, 140176018915328, 140176019099647, +STORE, 140176018907136, 140176019099647, +STORE, 53080064, 54059007, +STORE, 140175993769984, 140175996141567, +STORE, 140176018747392, 140176019099647, +STORE, 53080064, 54198271, +SNULL, 54190079, 54198271, +STORE, 53080064, 54190079, +STORE, 54190080, 54198271, +ERASE, 54190080, 54198271, +SNULL, 54181887, 54190079, +STORE, 53080064, 54181887, +STORE, 54181888, 54190079, +ERASE, 54181888, 54190079, +SNULL, 54173695, 54181887, +STORE, 53080064, 54173695, +STORE, 54173696, 54181887, +ERASE, 54173696, 54181887, +SNULL, 54165503, 54173695, +STORE, 53080064, 54165503, +STORE, 54165504, 54173695, +ERASE, 54165504, 54173695, +STORE, 140175993753600, 140175996141567, +STORE, 140175993688064, 140175996141567, +STORE, 140175993655296, 140175996141567, +STORE, 140175991558144, 140175996141567, +STORE, 140175991492608, 140175996141567, +STORE, 53080064, 54312959, +STORE, 140175991361536, 140175996141567, +STORE, 140175991099392, 140175996141567, +STORE, 140175991091200, 140175996141567, +STORE, 140175991074816, 140175996141567, +STORE, 140175991066624, 140175996141567, +STORE, 140175991058432, 140175996141567, +STORE, 53080064, 54448127, +SNULL, 54439935, 54448127, +STORE, 53080064, 54439935, +STORE, 54439936, 54448127, +ERASE, 54439936, 54448127, +SNULL, 54431743, 54439935, +STORE, 53080064, 54431743, +STORE, 54431744, 54439935, +ERASE, 54431744, 54439935, +SNULL, 54419455, 54431743, +STORE, 53080064, 54419455, +STORE, 54419456, 54431743, +ERASE, 54419456, 54431743, +SNULL, 54403071, 54419455, +STORE, 53080064, 54403071, +STORE, 54403072, 54419455, +ERASE, 54403072, 54419455, +STORE, 140175991042048, 140175996141567, +STORE, 53080064, 54538239, +SNULL, 54534143, 54538239, +STORE, 53080064, 54534143, +STORE, 54534144, 54538239, +ERASE, 54534144, 54538239, +SNULL, 54530047, 54534143, +STORE, 53080064, 54530047, +STORE, 54530048, 54534143, +ERASE, 54530048, 54534143, +SNULL, 54525951, 54530047, +STORE, 53080064, 54525951, +STORE, 54525952, 54530047, +ERASE, 54525952, 54530047, +SNULL, 54521855, 54525951, +STORE, 53080064, 54521855, +STORE, 54521856, 54525951, +ERASE, 54521856, 54525951, +SNULL, 54517759, 54521855, +STORE, 53080064, 54517759, +STORE, 54517760, 54521855, +ERASE, 54517760, 54521855, +SNULL, 54513663, 54517759, +STORE, 53080064, 54513663, +STORE, 54513664, 54517759, +ERASE, 54513664, 54517759, +SNULL, 54509567, 54513663, +STORE, 53080064, 54509567, +STORE, 54509568, 54513663, +ERASE, 54509568, 54513663, +STORE, 140175991025664, 140175996141567, +STORE, 140175990992896, 140175996141567, +STORE, 53080064, 54644735, +SNULL, 54628351, 54644735, +STORE, 53080064, 54628351, +STORE, 54628352, 54644735, +ERASE, 54628352, 54644735, +SNULL, 54616063, 54628351, +STORE, 53080064, 54616063, +STORE, 54616064, 54628351, +ERASE, 54616064, 54628351, +STORE, 140175988895744, 140175996141567, +STORE, 53080064, 54767615, +STORE, 140175988879360, 140175996141567, +STORE, 140175988617216, 140175996141567, +STORE, 140175988609024, 140175996141567, +STORE, 140175988600832, 140175996141567, +STORE, 53080064, 54906879, +SNULL, 54898687, 54906879, +STORE, 53080064, 54898687, +STORE, 54898688, 54906879, +ERASE, 54898688, 54906879, +SNULL, 54853631, 54898687, +STORE, 53080064, 54853631, +STORE, 54853632, 54898687, +ERASE, 54853632, 54898687, +STORE, 140175986503680, 140175996141567, +STORE, 53080064, 54996991, +STORE, 140175986495488, 140175996141567, +STORE, 140175986487296, 140175996141567, +STORE, 140175985438720, 140175996141567, +STORE, 53080064, 55136255, +STORE, 140175985405952, 140175996141567, +STORE, 140175985139712, 140175996141567, +SNULL, 140176018964479, 140176019099647, +STORE, 140176018747392, 140176018964479, +STORE, 140176018964480, 140176019099647, +ERASE, 140176018964480, 140176019099647, +STORE, 140175983042560, 140175996141567, +STORE, 140175982518272, 140175996141567, +STORE, 140175980421120, 140175996141567, +STORE, 53080064, 55287807, +STORE, 53080064, 55427071, +STORE, 140176019091456, 140176019099647, +STORE, 140176019083264, 140176019099647, +STORE, 140176019075072, 140176019099647, +STORE, 140176019066880, 140176019099647, +STORE, 140176019058688, 140176019099647, +STORE, 140175980158976, 140175996141567, +STORE, 140176019050496, 140176019099647, +STORE, 140176019042304, 140176019099647, +STORE, 140176019034112, 140176019099647, +STORE, 140176019025920, 140176019099647, +STORE, 140176019017728, 140176019099647, +STORE, 140176019009536, 140176019099647, +STORE, 140176019001344, 140176019099647, +STORE, 140176018993152, 140176019099647, +STORE, 140176018984960, 140176019099647, +STORE, 140176018976768, 140176019099647, +STORE, 140176018968576, 140176019099647, +STORE, 140175978061824, 140175996141567, +STORE, 53080064, 55603199, +STORE, 140175978029056, 140175996141567, +STORE, 140175977996288, 140175996141567, +STORE, 53080064, 55738367, +STORE, 53080064, 55881727, +STORE, 140175977963520, 140175996141567, +STORE, 140175977930752, 140175996141567, +STORE, 53080064, 56041471, +STORE, 140175977897984, 140175996141567, +STORE, 140175977865216, 140175996141567, +SNULL, 55881727, 56041471, +STORE, 53080064, 55881727, +STORE, 55881728, 56041471, +ERASE, 55881728, 56041471, +SNULL, 55721983, 55881727, +STORE, 53080064, 55721983, +STORE, 55721984, 55881727, +ERASE, 55721984, 55881727, +SNULL, 55570431, 55721983, +STORE, 53080064, 55570431, +STORE, 55570432, 55721983, +ERASE, 55570432, 55721983, +STORE, 140175977857024, 140175996141567, +STORE, 140175975759872, 140175996141567, +STORE, 53080064, 55754751, +STORE, 53080064, 55943167, +STORE, 140175975751680, 140175996141567, +STORE, 140175975743488, 140175996141567, +STORE, 140175975735296, 140175996141567, +STORE, 140175975727104, 140175996141567, +STORE, 140175975718912, 140175996141567, +STORE, 140175975710720, 140175996141567, +STORE, 140175975702528, 140175996141567, +STORE, 140175975694336, 140175996141567, +STORE, 140175975686144, 140175996141567, +STORE, 140175975677952, 140175996141567, +STORE, 140175975669760, 140175996141567, +STORE, 140175974621184, 140175996141567, +STORE, 140175974612992, 140175996141567, +STORE, 53080064, 56139775, +STORE, 140175972515840, 140175996141567, +STORE, 53080064, 56401919, +STORE, 140175970418688, 140175996141567, +STORE, 140175970410496, 140175996141567, +STORE, 140175970402304, 140175996141567, +STORE, 140175970394112, 140175996141567, +STORE, 53080064, 56569855, +STORE, 140175969865728, 140175996141567, +SNULL, 140175985139711, 140175996141567, +STORE, 140175969865728, 140175985139711, +STORE, 140175985139712, 140175996141567, +SNULL, 140175985139712, 140175985405951, +STORE, 140175985405952, 140175996141567, +STORE, 140175985139712, 140175985405951, +ERASE, 140175985139712, 140175985405951, +STORE, 140175965671424, 140175985139711, +STORE, 140175985397760, 140175996141567, +STORE, 140175985389568, 140175996141567, +STORE, 140175985381376, 140175996141567, +STORE, 140175985373184, 140175996141567, +STORE, 140175985364992, 140175996141567, +STORE, 140175985356800, 140175996141567, +STORE, 140175985348608, 140175996141567, +STORE, 140175985340416, 140175996141567, +STORE, 140175985332224, 140175996141567, +STORE, 140175985324032, 140175996141567, +STORE, 140175985315840, 140175996141567, +STORE, 140175985307648, 140175996141567, +STORE, 140175985299456, 140175996141567, +STORE, 140175985291264, 140175996141567, +STORE, 140175985283072, 140175996141567, +STORE, 140175985274880, 140175996141567, +STORE, 140175963574272, 140175985139711, +STORE, 140175985266688, 140175996141567, +STORE, 140175961477120, 140175985139711, +STORE, 53080064, 56831999, +STORE, 140175959379968, 140175985139711, +STORE, 140175985258496, 140175996141567, +STORE, 140175957282816, 140175985139711, +STORE, 140175985250304, 140175996141567, +STORE, 140175985242112, 140175996141567, +STORE, 140175985233920, 140175996141567, +STORE, 140175985225728, 140175996141567, +STORE, 140175985217536, 140175996141567, +STORE, 140175957151744, 140175985139711, +STORE, 140175956627456, 140175985139711, +SNULL, 140175980158975, 140175985139711, +STORE, 140175956627456, 140175980158975, +STORE, 140175980158976, 140175985139711, +SNULL, 140175980158976, 140175980421119, +STORE, 140175980421120, 140175985139711, +STORE, 140175980158976, 140175980421119, +ERASE, 140175980158976, 140175980421119, +STORE, 140175954530304, 140175980158975, +STORE, 140175985209344, 140175996141567, +STORE, 53080064, 57094143, +STORE, 140175952433152, 140175980158975, +STORE, 140175985192960, 140175996141567, +STORE, 140175985184768, 140175996141567, +STORE, 140175985176576, 140175996141567, +STORE, 140175985168384, 140175996141567, +STORE, 140175985160192, 140175996141567, +STORE, 140175985152000, 140175996141567, +STORE, 140175985143808, 140175996141567, +STORE, 140175980412928, 140175985139711, +STORE, 140175980404736, 140175985139711, +STORE, 140175980396544, 140175985139711, +STORE, 140175980388352, 140175985139711, +STORE, 140175980380160, 140175985139711, +STORE, 140175980371968, 140175985139711, +STORE, 140175980363776, 140175985139711, +STORE, 140175980355584, 140175985139711, +STORE, 140175980347392, 140175985139711, +STORE, 140175980339200, 140175985139711, +STORE, 53080064, 57356287, +SNULL, 140176018747392, 140176018907135, +STORE, 140176018907136, 140176018964479, +STORE, 140176018747392, 140176018907135, +ERASE, 140176018747392, 140176018907135, +STORE, 140175952146432, 140175980158975, +STORE, 140175950049280, 140175980158975, +SNULL, 140175952146431, 140175980158975, +STORE, 140175950049280, 140175952146431, +STORE, 140175952146432, 140175980158975, +SNULL, 140175952146432, 140175952433151, +STORE, 140175952433152, 140175980158975, +STORE, 140175952146432, 140175952433151, +ERASE, 140175952146432, 140175952433151, +STORE, 140176018898944, 140176018964479, +STORE, 53080064, 57749503, +STORE, 140175949520896, 140175952146431, +STORE, 140175947423744, 140175952146431, +SNULL, 140175993769983, 140175996141567, +STORE, 140175985143808, 140175993769983, +STORE, 140175993769984, 140175996141567, +SNULL, 140175993769984, 140175994044415, +STORE, 140175994044416, 140175996141567, +STORE, 140175993769984, 140175994044415, +ERASE, 140175993769984, 140175994044415, +STORE, 140176018890752, 140176018964479, +STORE, 140176018882560, 140176018964479, +STORE, 140176018874368, 140176018964479, +STORE, 140176018866176, 140176018964479, +STORE, 140176018849792, 140176018964479, +STORE, 140176018841600, 140176018964479, +STORE, 140176018825216, 140176018964479, +STORE, 140176018817024, 140176018964479, +STORE, 140176018800640, 140176018964479, +STORE, 140176018792448, 140176018964479, +STORE, 140176018759680, 140176018964479, +STORE, 140176018751488, 140176018964479, +STORE, 140175994028032, 140175996141567, +STORE, 140176018743296, 140176018964479, +STORE, 140175994011648, 140175996141567, +STORE, 140175994003456, 140175996141567, +STORE, 140175993987072, 140175996141567, +STORE, 140175993978880, 140175996141567, +STORE, 140175993946112, 140175996141567, +STORE, 140175993937920, 140175996141567, +STORE, 140175993921536, 140175996141567, +STORE, 140175993913344, 140175996141567, +STORE, 140175993896960, 140175996141567, +STORE, 140175993888768, 140175996141567, +STORE, 140175993872384, 140175996141567, +STORE, 140175993864192, 140175996141567, +STORE, 140175993831424, 140175996141567, +STORE, 140175993823232, 140175996141567, +STORE, 140175993806848, 140175996141567, +STORE, 140175993798656, 140175996141567, +STORE, 140175993782272, 140175996141567, +STORE, 140175993774080, 140175996141567, +STORE, 140175980322816, 140175985139711, +STORE, 140175980314624, 140175985139711, +STORE, 140175980281856, 140175985139711, +STORE, 140175980273664, 140175985139711, +STORE, 140175980257280, 140175985139711, +STORE, 140175945326592, 140175952146431, +STORE, 140175980249088, 140175985139711, +STORE, 140175980232704, 140175985139711, +STORE, 140175980224512, 140175985139711, +STORE, 140175980208128, 140175985139711, +STORE, 140175980199936, 140175985139711, +STORE, 140175980167168, 140175985139711, +STORE, 140175952433152, 140175985139711, +STORE, 140175952416768, 140175985139711, +STORE, 140175952408576, 140175985139711, +STORE, 140175952392192, 140175985139711, +STORE, 140175952384000, 140175985139711, +STORE, 140175952367616, 140175985139711, +STORE, 140175943229440, 140175952146431, +STORE, 140175952359424, 140175985139711, +STORE, 140175952326656, 140175985139711, +STORE, 140175952318464, 140175985139711, +STORE, 140175952302080, 140175985139711, +STORE, 140175952293888, 140175985139711, +STORE, 140175952277504, 140175985139711, +STORE, 140175952269312, 140175985139711, +STORE, 140175952252928, 140175985139711, +STORE, 140175952244736, 140175985139711, +STORE, 140175952211968, 140175985139711, +STORE, 140175952203776, 140175985139711, +STORE, 140175952187392, 140175985139711, +STORE, 140175952179200, 140175985139711, +STORE, 140175952162816, 140175985139711, +STORE, 140175952154624, 140175985139711, +STORE, 140175943213056, 140175952146431, +STORE, 140175943213056, 140175985139711, +STORE, 140175943180288, 140175985139711, +STORE, 140175943172096, 140175985139711, +STORE, 140175943155712, 140175985139711, +STORE, 140175943147520, 140175985139711, +STORE, 140175943131136, 140175985139711, +STORE, 140175943122944, 140175985139711, +STORE, 140175943106560, 140175985139711, +STORE, 140175943098368, 140175985139711, +STORE, 140175943065600, 140175985139711, +STORE, 140175943057408, 140175985139711, +STORE, 140175943041024, 140175985139711, +STORE, 140175943032832, 140175985139711, +STORE, 140175943016448, 140175985139711, +STORE, 140175943008256, 140175985139711, +STORE, 140175942991872, 140175985139711, +STORE, 140175942983680, 140175985139711, +STORE, 140175942950912, 140175985139711, +STORE, 140175942942720, 140175985139711, +STORE, 140175942926336, 140175985139711, +STORE, 140175942918144, 140175985139711, +STORE, 140175942901760, 140175985139711, +STORE, 140175942893568, 140175985139711, +STORE, 140175942877184, 140175985139711, +STORE, 140175942868992, 140175985139711, +STORE, 140175942836224, 140175985139711, +STORE, 140175942828032, 140175985139711, +STORE, 140175942811648, 140175985139711, +STORE, 140175942803456, 140175985139711, +STORE, 140175942787072, 140175985139711, +STORE, 140175942778880, 140175985139711, +STORE, 140175942762496, 140175985139711, +STORE, 140175942754304, 140175985139711, +STORE, 140175942721536, 140175985139711, +STORE, 140175942713344, 140175985139711, +STORE, 140175942696960, 140175985139711, +STORE, 140175942688768, 140175985139711, +STORE, 140175942672384, 140175985139711, +STORE, 140175942664192, 140175985139711, +STORE, 140175942647808, 140175985139711, +STORE, 140175942639616, 140175985139711, +STORE, 140175942606848, 140175985139711, +STORE, 140175942598656, 140175985139711, +STORE, 140175942582272, 140175985139711, +STORE, 140175942574080, 140175985139711, +STORE, 140175942557696, 140175985139711, +STORE, 140175942549504, 140175985139711, +STORE, 140175942533120, 140175985139711, +STORE, 140175942524928, 140175985139711, +STORE, 140175942492160, 140175985139711, +STORE, 140175942483968, 140175985139711, +STORE, 140175942467584, 140175985139711, +STORE, 140175942459392, 140175985139711, +STORE, 140175942443008, 140175985139711, +STORE, 140175942434816, 140175985139711, +STORE, 140175942418432, 140175985139711, +STORE, 140175942410240, 140175985139711, +STORE, 140175942377472, 140175985139711, +STORE, 140175942369280, 140175985139711, +STORE, 140175942352896, 140175985139711, +STORE, 140175942344704, 140175985139711, +STORE, 140175942328320, 140175985139711, +STORE, 140175942320128, 140175985139711, +STORE, 140175942303744, 140175985139711, +STORE, 140175942295552, 140175985139711, +STORE, 140175942262784, 140175985139711, +STORE, 140175942254592, 140175985139711, +STORE, 140175942238208, 140175985139711, +STORE, 140175942230016, 140175985139711, +STORE, 140175942213632, 140175985139711, +STORE, 140175942205440, 140175985139711, +STORE, 140175942189056, 140175985139711, +STORE, 140175942180864, 140175985139711, +STORE, 140175942148096, 140175985139711, +STORE, 140175942139904, 140175985139711, +STORE, 140175942123520, 140175985139711, +STORE, 140175942115328, 140175985139711, +STORE, 140175942098944, 140175985139711, +STORE, 140175942090752, 140175985139711, +STORE, 140175942074368, 140175985139711, +STORE, 140175942066176, 140175985139711, +STORE, 140175942033408, 140175985139711, +STORE, 140175942025216, 140175985139711, +STORE, 140175942008832, 140175985139711, +STORE, 140175942000640, 140175985139711, +STORE, 140175941984256, 140175985139711, +STORE, 140175941976064, 140175985139711, +STORE, 140175941959680, 140175985139711, +STORE, 140175939862528, 140175985139711, +STORE, 140175939854336, 140175985139711, +STORE, 140175939821568, 140175985139711, +STORE, 140175939813376, 140175985139711, +STORE, 140175939796992, 140175985139711, +STORE, 140175939788800, 140175985139711, +STORE, 140175939772416, 140175985139711, +STORE, 140175939764224, 140175985139711, +STORE, 140175939747840, 140175985139711, +STORE, 140175939739648, 140175985139711, +STORE, 140175939706880, 140175985139711, +STORE, 140175939698688, 140175985139711, +STORE, 140175939682304, 140175985139711, +STORE, 140175939674112, 140175985139711, +STORE, 140175939657728, 140175985139711, +STORE, 140175939649536, 140175985139711, +STORE, 140175939633152, 140175985139711, +STORE, 140175939624960, 140175985139711, +STORE, 140175939592192, 140175985139711, +STORE, 140175939584000, 140175985139711, +STORE, 140175939567616, 140175985139711, +STORE, 140175939559424, 140175985139711, +STORE, 140175939543040, 140175985139711, +STORE, 140175939534848, 140175985139711, +STORE, 140175939518464, 140175985139711, +STORE, 140175939510272, 140175985139711, +STORE, 140175939477504, 140175985139711, +STORE, 140175939469312, 140175985139711, +STORE, 140175939452928, 140175985139711, +STORE, 140175939444736, 140175985139711, +STORE, 140175939428352, 140175985139711, +STORE, 140175939420160, 140175985139711, +STORE, 140175939403776, 140175985139711, +STORE, 140175939395584, 140175985139711, +STORE, 140175939362816, 140175985139711, +STORE, 140175939354624, 140175985139711, +STORE, 140175939338240, 140175985139711, +STORE, 140175939330048, 140175985139711, +STORE, 140175939313664, 140175985139711, +STORE, 140175939305472, 140175985139711, +STORE, 140175939289088, 140175985139711, +STORE, 140175939280896, 140175985139711, +STORE, 140175939248128, 140175985139711, +STORE, 140175939239936, 140175985139711, +STORE, 140175939223552, 140175985139711, +STORE, 140175939215360, 140175985139711, +STORE, 140175939198976, 140175985139711, +STORE, 140175939190784, 140175985139711, +STORE, 140175939174400, 140175985139711, +STORE, 140175939166208, 140175985139711, +STORE, 140175939133440, 140175985139711, +STORE, 140175939125248, 140175985139711, +STORE, 140175939108864, 140175985139711, +STORE, 140175939100672, 140175985139711, +STORE, 140175939084288, 140175985139711, +STORE, 140175939076096, 140175985139711, +STORE, 140175939059712, 140175985139711, +STORE, 140175939051520, 140175985139711, +STORE, 140175939018752, 140175985139711, +STORE, 140175939010560, 140175985139711, +STORE, 140175938994176, 140175985139711, +STORE, 140175938985984, 140175985139711, +STORE, 140175938969600, 140175985139711, +STORE, 140175938961408, 140175985139711, +STORE, 140175938945024, 140175985139711, +STORE, 140175938936832, 140175985139711, +STORE, 140175938904064, 140175985139711, +STORE, 140175938895872, 140175985139711, +STORE, 140175938879488, 140175985139711, +STORE, 140175938871296, 140175985139711, +STORE, 140175938854912, 140175985139711, +STORE, 140175938846720, 140175985139711, +STORE, 140175938830336, 140175985139711, +STORE, 140175938822144, 140175985139711, +STORE, 140175938789376, 140175985139711, +STORE, 140175938781184, 140175985139711, +STORE, 140175938764800, 140175985139711, +STORE, 140175938756608, 140175985139711, +STORE, 140175938740224, 140175985139711, +STORE, 140175938732032, 140175985139711, +STORE, 140175938715648, 140175985139711, +STORE, 140175938707456, 140175985139711, +STORE, 140175938674688, 140175985139711, +STORE, 140175938666496, 140175985139711, +STORE, 140175938650112, 140175985139711, +STORE, 140175938641920, 140175985139711, +STORE, 140175938625536, 140175985139711, +STORE, 140175938617344, 140175985139711, +STORE, 140175938600960, 140175985139711, +STORE, 140175938592768, 140175985139711, +STORE, 140175938560000, 140175985139711, +STORE, 140175938551808, 140175985139711, +STORE, 140175938535424, 140175985139711, +STORE, 140175938527232, 140175985139711, +STORE, 140175938510848, 140175985139711, +STORE, 140175938502656, 140175985139711, +STORE, 140175938486272, 140175985139711, +STORE, 140175938478080, 140175985139711, +STORE, 140175938445312, 140175985139711, +STORE, 140175938437120, 140175985139711, +STORE, 140175938420736, 140175985139711, +STORE, 140175938412544, 140175985139711, +STORE, 140175938396160, 140175985139711, +STORE, 140175938387968, 140175985139711, +STORE, 140175938371584, 140175985139711, +STORE, 140175938363392, 140175985139711, +STORE, 140175938330624, 140175985139711, +STORE, 140175938322432, 140175985139711, +STORE, 140175938306048, 140175985139711, +STORE, 140175938297856, 140175985139711, +STORE, 140175938281472, 140175985139711, +STORE, 140175938273280, 140175985139711, +STORE, 140175938256896, 140175985139711, +STORE, 140175938248704, 140175985139711, +STORE, 140175938215936, 140175985139711, +STORE, 140175938207744, 140175985139711, +STORE, 140175938191360, 140175985139711, +STORE, 140175938183168, 140175985139711, +STORE, 140175938166784, 140175985139711, +STORE, 140175938158592, 140175985139711, +STORE, 140175938142208, 140175985139711, +STORE, 140175936045056, 140175985139711, +STORE, 140175936036864, 140175985139711, +STORE, 140175936004096, 140175985139711, +STORE, 140175935995904, 140175985139711, +STORE, 140175935979520, 140175985139711, +STORE, 140175935971328, 140175985139711, +STORE, 140175935954944, 140175985139711, +STORE, 140175935946752, 140175985139711, +STORE, 140175935930368, 140175985139711, +STORE, 140175935922176, 140175985139711, +STORE, 140175935889408, 140175985139711, +STORE, 140175935881216, 140175985139711, +STORE, 140175935864832, 140175985139711, +STORE, 140175935856640, 140175985139711, +STORE, 140175935840256, 140175985139711, +STORE, 140175935832064, 140175985139711, +STORE, 140175935815680, 140175985139711, +STORE, 140175935807488, 140175985139711, +STORE, 140175935774720, 140175985139711, +STORE, 140175935766528, 140175985139711, +STORE, 140175935750144, 140175985139711, +STORE, 140175935741952, 140175985139711, +STORE, 140175935725568, 140175985139711, +STORE, 140175935717376, 140175985139711, +STORE, 140175935700992, 140175985139711, +STORE, 140175935692800, 140175985139711, +STORE, 140175935660032, 140175985139711, +STORE, 140175935651840, 140175985139711, +STORE, 140175935635456, 140175985139711, +STORE, 140175935627264, 140175985139711, +STORE, 140175935610880, 140175985139711, +STORE, 140175935602688, 140175985139711, +STORE, 140175935586304, 140175985139711, +STORE, 140175935578112, 140175985139711, +STORE, 140175935545344, 140175985139711, +STORE, 140175935537152, 140175985139711, +STORE, 140175935520768, 140175985139711, +STORE, 140175935512576, 140175985139711, +STORE, 140175935496192, 140175985139711, +STORE, 140175935488000, 140175985139711, +STORE, 140175935471616, 140175985139711, +STORE, 140175935463424, 140175985139711, +STORE, 140175935430656, 140175985139711, +STORE, 140175935422464, 140175985139711, +STORE, 140175935406080, 140175985139711, +STORE, 140175935397888, 140175985139711, +STORE, 140175935381504, 140175985139711, +STORE, 140175935373312, 140175985139711, +STORE, 140175935356928, 140175985139711, +STORE, 140175935348736, 140175985139711, +STORE, 140175935315968, 140175985139711, +STORE, 140175935307776, 140175985139711, +STORE, 140175935291392, 140175985139711, +STORE, 140175935283200, 140175985139711, +STORE, 140175935266816, 140175985139711, +STORE, 140175935258624, 140175985139711, +STORE, 140175935242240, 140175985139711, +STORE, 140175935234048, 140175985139711, +STORE, 140175935201280, 140175985139711, +STORE, 140175935193088, 140175985139711, +STORE, 140175935176704, 140175985139711, +STORE, 140175935168512, 140175985139711, +STORE, 140175935152128, 140175985139711, +STORE, 140175935143936, 140175985139711, +STORE, 140175935127552, 140175985139711, +STORE, 140175935119360, 140175985139711, +STORE, 140175935086592, 140175985139711, +STORE, 140175935078400, 140175985139711, +STORE, 140175935062016, 140175985139711, +STORE, 140175935053824, 140175985139711, +STORE, 140175935037440, 140175985139711, +STORE, 140175935029248, 140175985139711, +STORE, 140175935012864, 140175985139711, +STORE, 140175935004672, 140175985139711, +STORE, 140175934971904, 140175985139711, +STORE, 140175934963712, 140175985139711, +STORE, 140175934947328, 140175985139711, +STORE, 140175934939136, 140175985139711, +STORE, 140175934922752, 140175985139711, +STORE, 140175934914560, 140175985139711, +STORE, 140175934898176, 140175985139711, +STORE, 140175934889984, 140175985139711, +STORE, 140175934857216, 140175985139711, +STORE, 140175934849024, 140175985139711, +STORE, 140175934832640, 140175985139711, +STORE, 140175934824448, 140175985139711, +STORE, 140175934808064, 140175985139711, +STORE, 140175934799872, 140175985139711, +STORE, 140175934783488, 140175985139711, +STORE, 140175934775296, 140175985139711, +STORE, 140175934742528, 140175985139711, +STORE, 140175934734336, 140175985139711, +STORE, 140175934717952, 140175985139711, +STORE, 140175934709760, 140175985139711, +STORE, 140175934693376, 140175985139711, +STORE, 140175934685184, 140175985139711, +STORE, 140175934668800, 140175985139711, +STORE, 140175934660608, 140175985139711, +STORE, 140175934627840, 140175985139711, +STORE, 140175934619648, 140175985139711, +STORE, 140175934603264, 140175985139711, +STORE, 140175934595072, 140175985139711, +STORE, 140175934578688, 140175985139711, +STORE, 140175934570496, 140175985139711, +STORE, 140175934554112, 140175985139711, +STORE, 140175934545920, 140175985139711, +STORE, 140175934513152, 140175985139711, +STORE, 140175934504960, 140175985139711, +STORE, 140175934488576, 140175985139711, +STORE, 140175934480384, 140175985139711, +STORE, 140175934464000, 140175985139711, +STORE, 140175934455808, 140175985139711, +STORE, 140175934439424, 140175985139711, +STORE, 140175934431232, 140175985139711, +STORE, 140175934398464, 140175985139711, +STORE, 140175934390272, 140175985139711, +STORE, 140175934373888, 140175985139711, +STORE, 140175934365696, 140175985139711, +STORE, 140175934349312, 140175985139711, +STORE, 140175934341120, 140175985139711, +STORE, 140175934324736, 140175985139711, +STORE, 140175932227584, 140175985139711, +STORE, 140175932219392, 140175985139711, +STORE, 140175932186624, 140175985139711, +STORE, 140175932178432, 140175985139711, +STORE, 140175932162048, 140175985139711, +STORE, 140175932153856, 140175985139711, +STORE, 140175932137472, 140175985139711, +STORE, 53080064, 57884671, +STORE, 140175932129280, 140175985139711, +STORE, 140175932112896, 140175985139711, +STORE, 140175932104704, 140175985139711, +STORE, 140175932071936, 140175985139711, +STORE, 140175932063744, 140175985139711, +STORE, 140175932047360, 140175985139711, +STORE, 140175932039168, 140175985139711, +STORE, 140175932022784, 140175985139711, +STORE, 140175932014592, 140175985139711, +STORE, 140175931998208, 140175985139711, +STORE, 140175931990016, 140175985139711, +STORE, 140175931957248, 140175985139711, +STORE, 140175931949056, 140175985139711, +STORE, 140175931932672, 140175985139711, +STORE, 140175931924480, 140175985139711, +STORE, 140175931908096, 140175985139711, +STORE, 140175931899904, 140175985139711, +STORE, 140175931883520, 140175985139711, +STORE, 140175931875328, 140175985139711, +STORE, 140175931842560, 140175985139711, +STORE, 140175931834368, 140175985139711, +STORE, 140175931817984, 140175985139711, +STORE, 140175931809792, 140175985139711, +STORE, 140175931793408, 140175985139711, +STORE, 140175931785216, 140175985139711, +STORE, 140175931768832, 140175985139711, +STORE, 140175931760640, 140175985139711, +STORE, 140175931727872, 140175985139711, +STORE, 140175931719680, 140175985139711, +STORE, 140175931703296, 140175985139711, +STORE, 140175931695104, 140175985139711, +STORE, 140175931678720, 140175985139711, +STORE, 140175931670528, 140175985139711, +STORE, 140175931654144, 140175985139711, +STORE, 140175931645952, 140175985139711, +STORE, 140175931613184, 140175985139711, +STORE, 140175931604992, 140175985139711, +STORE, 140175931588608, 140175985139711, +STORE, 140175931580416, 140175985139711, +STORE, 140175931564032, 140175985139711, +STORE, 140175931555840, 140175985139711, +STORE, 140175931539456, 140175985139711, +STORE, 140175931531264, 140175985139711, +STORE, 140175931498496, 140175985139711, +STORE, 140175931490304, 140175985139711, +STORE, 140175931473920, 140175985139711, +STORE, 140175931465728, 140175985139711, +STORE, 140175931449344, 140175985139711, +STORE, 140175931441152, 140175985139711, +STORE, 140175931424768, 140175985139711, +STORE, 140175931416576, 140175985139711, +STORE, 140175931383808, 140175985139711, +STORE, 140175931375616, 140175985139711, +STORE, 140175931359232, 140175985139711, +STORE, 140175931351040, 140175985139711, +STORE, 140175931334656, 140175985139711, +STORE, 140175931326464, 140175985139711, +STORE, 140175931310080, 140175985139711, +STORE, 140175931301888, 140175985139711, +STORE, 140175931269120, 140175985139711, +STORE, 140175931260928, 140175985139711, +STORE, 140175931244544, 140175985139711, +STORE, 140175931236352, 140175985139711, +STORE, 140175931219968, 140175985139711, +STORE, 140175931211776, 140175985139711, +STORE, 140175931195392, 140175985139711, +STORE, 140175931187200, 140175985139711, +STORE, 140175931154432, 140175985139711, +STORE, 140175931146240, 140175985139711, +STORE, 140175931129856, 140175985139711, +STORE, 140175931121664, 140175985139711, +STORE, 140175931105280, 140175985139711, +STORE, 140175931097088, 140175985139711, +STORE, 140175931080704, 140175985139711, +STORE, 140175931072512, 140175985139711, +STORE, 140175931039744, 140175985139711, +STORE, 140175931031552, 140175985139711, +STORE, 140175931015168, 140175985139711, +STORE, 140175931006976, 140175985139711, +STORE, 140175930990592, 140175985139711, +STORE, 140175930982400, 140175985139711, +STORE, 140175930966016, 140175985139711, +STORE, 140175930957824, 140175985139711, +STORE, 140175930925056, 140175985139711, +STORE, 140175930916864, 140175985139711, +STORE, 140175930900480, 140175985139711, +STORE, 140175930892288, 140175985139711, +STORE, 140175930875904, 140175985139711, +STORE, 140175930867712, 140175985139711, +STORE, 140175930851328, 140175985139711, +STORE, 140175930843136, 140175985139711, +STORE, 140175930810368, 140175985139711, +STORE, 140175930802176, 140175985139711, +STORE, 140175930785792, 140175985139711, +STORE, 140175930777600, 140175985139711, +STORE, 140175930761216, 140175985139711, +STORE, 140175930753024, 140175985139711, +STORE, 140175930736640, 140175985139711, +STORE, 140175930728448, 140175985139711, +STORE, 140175930695680, 140175985139711, +STORE, 140175930687488, 140175985139711, +STORE, 140175930671104, 140175985139711, +STORE, 140175930662912, 140175985139711, +STORE, 140175930646528, 140175985139711, +STORE, 140175930638336, 140175985139711, +STORE, 140175930621952, 140175985139711, +STORE, 140175930613760, 140175985139711, +STORE, 140175930580992, 140175985139711, +STORE, 140175930572800, 140175985139711, +STORE, 140175930556416, 140175985139711, +STORE, 140175930548224, 140175985139711, +STORE, 140175930531840, 140175985139711, +STORE, 140175930523648, 140175985139711, +STORE, 140175930507264, 140175985139711, +STORE, 140175928410112, 140175985139711, +STORE, 140175928401920, 140175985139711, +STORE, 140175928369152, 140175985139711, +STORE, 140175928360960, 140175985139711, +STORE, 140175928344576, 140175985139711, +STORE, 140175928336384, 140175985139711, +STORE, 140175928320000, 140175985139711, +STORE, 140175928311808, 140175985139711, +STORE, 140175928295424, 140175985139711, +STORE, 140175927242752, 140175985139711, +SNULL, 140175956627455, 140175985139711, +STORE, 140175927242752, 140175956627455, +STORE, 140175956627456, 140175985139711, + }; + unsigned long set24[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140735281639424, 140737488351231, +SNULL, 140735281643519, 140737488351231, +STORE, 140735281639424, 140735281643519, +STORE, 140735281508352, 140735281643519, +STORE, 94717834911744, 94717834928127, +SNULL, 94717834915839, 94717834928127, +STORE, 94717834911744, 94717834915839, +STORE, 94717834915840, 94717834928127, +ERASE, 94717834915840, 94717834928127, +STORE, 94717834919936, 94717834928127, +STORE, 140428246065152, 140428248317951, +SNULL, 140428246208511, 140428248317951, +STORE, 140428246065152, 140428246208511, +STORE, 140428246208512, 140428248317951, +ERASE, 140428246208512, 140428248317951, +STORE, 140428248305664, 140428248313855, +STORE, 140428248313856, 140428248317951, +STORE, 140735281811456, 140735281815551, +STORE, 140735281799168, 140735281811455, +STORE, 140428248297472, 140428248305663, +STORE, 140428243841024, 140428246065151, +SNULL, 140428245491711, 140428246065151, +STORE, 140428243841024, 140428245491711, +STORE, 140428245491712, 140428246065151, +SNULL, 140428245491712, 140428246061055, +STORE, 140428246061056, 140428246065151, +STORE, 140428245491712, 140428246061055, +ERASE, 140428245491712, 140428246061055, +STORE, 140428245491712, 140428246061055, +ERASE, 140428246061056, 140428246065151, +STORE, 140428246061056, 140428246065151, +STORE, 140428248268800, 140428248297471, +STORE, 140428241625088, 140428243841023, +SNULL, 140428241625088, 140428241723391, +STORE, 140428241723392, 140428243841023, +STORE, 140428241625088, 140428241723391, +SNULL, 140428243816447, 140428243841023, +STORE, 140428241723392, 140428243816447, +STORE, 140428243816448, 140428243841023, +SNULL, 140428243816448, 140428243824639, +STORE, 140428243824640, 140428243841023, +STORE, 140428243816448, 140428243824639, +ERASE, 140428243816448, 140428243824639, +STORE, 140428243816448, 140428243824639, +ERASE, 140428243824640, 140428243841023, +STORE, 140428243824640, 140428243841023, +STORE, 140428237828096, 140428241625087, +SNULL, 140428237828096, 140428239486975, +STORE, 140428239486976, 140428241625087, +STORE, 140428237828096, 140428239486975, +SNULL, 140428241584127, 140428241625087, +STORE, 140428239486976, 140428241584127, +STORE, 140428241584128, 140428241625087, +SNULL, 140428241584128, 140428241608703, +STORE, 140428241608704, 140428241625087, +STORE, 140428241584128, 140428241608703, +ERASE, 140428241584128, 140428241608703, +STORE, 140428241584128, 140428241608703, +ERASE, 140428241608704, 140428241625087, +STORE, 140428241608704, 140428241625087, +STORE, 140428235567104, 140428237828095, +SNULL, 140428235567104, 140428235718655, +STORE, 140428235718656, 140428237828095, +STORE, 140428235567104, 140428235718655, +SNULL, 140428237811711, 140428237828095, +STORE, 140428235718656, 140428237811711, +STORE, 140428237811712, 140428237828095, +SNULL, 140428237811712, 140428237819903, +STORE, 140428237819904, 140428237828095, +STORE, 140428237811712, 140428237819903, +ERASE, 140428237811712, 140428237819903, +STORE, 140428237811712, 140428237819903, +ERASE, 140428237819904, 140428237828095, +STORE, 140428237819904, 140428237828095, +STORE, 140428233445376, 140428235567103, +SNULL, 140428233445376, 140428233461759, +STORE, 140428233461760, 140428235567103, +STORE, 140428233445376, 140428233461759, +SNULL, 140428235558911, 140428235567103, +STORE, 140428233461760, 140428235558911, +STORE, 140428235558912, 140428235567103, +ERASE, 140428235558912, 140428235567103, +STORE, 140428235558912, 140428235567103, +STORE, 140428231315456, 140428233445375, +SNULL, 140428231315456, 140428231344127, +STORE, 140428231344128, 140428233445375, +STORE, 140428231315456, 140428231344127, +SNULL, 140428233437183, 140428233445375, +STORE, 140428231344128, 140428233437183, +STORE, 140428233437184, 140428233445375, +ERASE, 140428233437184, 140428233445375, +STORE, 140428233437184, 140428233445375, +STORE, 140428248260608, 140428248268799, +STORE, 140428229062656, 140428231315455, +SNULL, 140428229062656, 140428229214207, +STORE, 140428229214208, 140428231315455, +STORE, 140428229062656, 140428229214207, +SNULL, 140428231307263, 140428231315455, +STORE, 140428229214208, 140428231307263, +STORE, 140428231307264, 140428231315455, +ERASE, 140428231307264, 140428231315455, +STORE, 140428231307264, 140428231315455, +STORE, 140428226891776, 140428229062655, +SNULL, 140428226891776, 140428226961407, +STORE, 140428226961408, 140428229062655, +STORE, 140428226891776, 140428226961407, +SNULL, 140428229054463, 140428229062655, +STORE, 140428226961408, 140428229054463, +STORE, 140428229054464, 140428229062655, +ERASE, 140428229054464, 140428229062655, +STORE, 140428229054464, 140428229062655, +STORE, 140428223680512, 140428226891775, +SNULL, 140428223680512, 140428224757759, +STORE, 140428224757760, 140428226891775, +STORE, 140428223680512, 140428224757759, +SNULL, 140428226854911, 140428226891775, +STORE, 140428224757760, 140428226854911, +STORE, 140428226854912, 140428226891775, +ERASE, 140428226854912, 140428226891775, +STORE, 140428226854912, 140428226891775, +STORE, 140428221546496, 140428223680511, +SNULL, 140428221546496, 140428221575167, +STORE, 140428221575168, 140428223680511, +STORE, 140428221546496, 140428221575167, +SNULL, 140428223672319, 140428223680511, +STORE, 140428221575168, 140428223672319, +STORE, 140428223672320, 140428223680511, +ERASE, 140428223672320, 140428223680511, +STORE, 140428223672320, 140428223680511, +STORE, 140428219236352, 140428221546495, +SNULL, 140428219236352, 140428219441151, +STORE, 140428219441152, 140428221546495, +STORE, 140428219236352, 140428219441151, +SNULL, 140428221538303, 140428221546495, +STORE, 140428219441152, 140428221538303, +STORE, 140428221538304, 140428221546495, +ERASE, 140428221538304, 140428221546495, +STORE, 140428221538304, 140428221546495, +STORE, 140428216852480, 140428219236351, +SNULL, 140428216852480, 140428217044991, +STORE, 140428217044992, 140428219236351, +STORE, 140428216852480, 140428217044991, +SNULL, 140428219138047, 140428219236351, +STORE, 140428217044992, 140428219138047, +STORE, 140428219138048, 140428219236351, +ERASE, 140428219138048, 140428219236351, +STORE, 140428219138048, 140428219236351, +STORE, 140428248252416, 140428248268799, +STORE, 140428214284288, 140428216852479, +SNULL, 140428214284288, 140428214751231, +STORE, 140428214751232, 140428216852479, +STORE, 140428214284288, 140428214751231, +SNULL, 140428216844287, 140428216852479, +STORE, 140428214751232, 140428216844287, +STORE, 140428216844288, 140428216852479, +ERASE, 140428216844288, 140428216852479, +STORE, 140428216844288, 140428216852479, +STORE, 140428212170752, 140428214284287, +SNULL, 140428212170752, 140428212183039, +STORE, 140428212183040, 140428214284287, +STORE, 140428212170752, 140428212183039, +SNULL, 140428214276095, 140428214284287, +STORE, 140428212183040, 140428214276095, +STORE, 140428214276096, 140428214284287, +ERASE, 140428214276096, 140428214284287, +STORE, 140428214276096, 140428214284287, +STORE, 140428209991680, 140428212170751, +SNULL, 140428209991680, 140428210069503, +STORE, 140428210069504, 140428212170751, +STORE, 140428209991680, 140428210069503, +SNULL, 140428212162559, 140428212170751, +STORE, 140428210069504, 140428212162559, +STORE, 140428212162560, 140428212170751, +ERASE, 140428212162560, 140428212170751, +STORE, 140428212162560, 140428212170751, +STORE, 140428207874048, 140428209991679, +SNULL, 140428207874048, 140428207890431, +STORE, 140428207890432, 140428209991679, +STORE, 140428207874048, 140428207890431, +SNULL, 140428209983487, 140428209991679, +STORE, 140428207890432, 140428209983487, +STORE, 140428209983488, 140428209991679, +ERASE, 140428209983488, 140428209991679, +STORE, 140428209983488, 140428209991679, +STORE, 140428248244224, 140428248268799, +STORE, 140428248231936, 140428248268799, +SNULL, 140428241600511, 140428241608703, +STORE, 140428241584128, 140428241600511, +STORE, 140428241600512, 140428241608703, +SNULL, 140428209987583, 140428209991679, +STORE, 140428209983488, 140428209987583, +STORE, 140428209987584, 140428209991679, +SNULL, 140428212166655, 140428212170751, +STORE, 140428212162560, 140428212166655, +STORE, 140428212166656, 140428212170751, +SNULL, 140428214280191, 140428214284287, +STORE, 140428214276096, 140428214280191, +STORE, 140428214280192, 140428214284287, +SNULL, 140428243820543, 140428243824639, +STORE, 140428243816448, 140428243820543, +STORE, 140428243820544, 140428243824639, +SNULL, 140428216848383, 140428216852479, +STORE, 140428216844288, 140428216848383, +STORE, 140428216848384, 140428216852479, +SNULL, 140428219232255, 140428219236351, +STORE, 140428219138048, 140428219232255, +STORE, 140428219232256, 140428219236351, +SNULL, 140428221542399, 140428221546495, +STORE, 140428221538304, 140428221542399, +STORE, 140428221542400, 140428221546495, +SNULL, 140428223676415, 140428223680511, +STORE, 140428223672320, 140428223676415, +STORE, 140428223676416, 140428223680511, +SNULL, 140428226863103, 140428226891775, +STORE, 140428226854912, 140428226863103, +STORE, 140428226863104, 140428226891775, +SNULL, 140428229058559, 140428229062655, +STORE, 140428229054464, 140428229058559, +STORE, 140428229058560, 140428229062655, +SNULL, 140428231311359, 140428231315455, +STORE, 140428231307264, 140428231311359, +STORE, 140428231311360, 140428231315455, +SNULL, 140428233441279, 140428233445375, +STORE, 140428233437184, 140428233441279, +STORE, 140428233441280, 140428233445375, +SNULL, 140428235563007, 140428235567103, +STORE, 140428235558912, 140428235563007, +STORE, 140428235563008, 140428235567103, +SNULL, 140428237815807, 140428237819903, +STORE, 140428237811712, 140428237815807, +STORE, 140428237815808, 140428237819903, +SNULL, 140428246056959, 140428246061055, +STORE, 140428245491712, 140428246056959, +STORE, 140428246056960, 140428246061055, +SNULL, 94717834924031, 94717834928127, +STORE, 94717834919936, 94717834924031, +STORE, 94717834924032, 94717834928127, +SNULL, 140428248309759, 140428248313855, +STORE, 140428248305664, 140428248309759, +STORE, 140428248309760, 140428248313855, +ERASE, 140428248268800, 140428248297471, +STORE, 94717843058688, 94717843193855, +STORE, 94749677137920, 94749677559807, +STORE, 94749677563904, 94749677604863, +STORE, 94749677604864, 94749677608959, +STORE, 94749710970880, 94749711241215, +STORE, 140490884894720, 140490884935679, +STORE, 140490884935680, 140490887032831, +STORE, 140490887032832, 140490887036927, +STORE, 140490887036928, 140490887041023, +STORE, 140490887041024, 140490887065599, +STORE, 140490887065600, 140490887110655, +STORE, 140490887110656, 140490889203711, +STORE, 140490889203712, 140490889207807, +STORE, 140490889207808, 140490889211903, +STORE, 140490889211904, 140490889293823, +STORE, 140490889293824, 140490891390975, +STORE, 140490891390976, 140490891395071, +STORE, 140490891395072, 140490891399167, +STORE, 140490891399168, 140490891407359, +STORE, 140490891407360, 140490891436031, +STORE, 140490891436032, 140490893529087, +STORE, 140490893529088, 140490893533183, +STORE, 140490893533184, 140490893537279, +STORE, 140490893537280, 140490901979135, +STORE, 140490901979136, 140490901991423, +STORE, 140490901991424, 140490904084479, +STORE, 140490904084480, 140490904088575, +STORE, 140490904088576, 140490904092671, +STORE, 140490904092672, 140490904559615, +STORE, 140490904559616, 140490906652671, +STORE, 140490906652672, 140490906656767, +STORE, 140490906656768, 140490906660863, +STORE, 140490906660864, 140490906677247, +STORE, 140490906677248, 140490908770303, +STORE, 140490908770304, 140490908774399, +STORE, 140490908774400, 140490908778495, +STORE, 140490908778496, 140490908794879, +STORE, 140490908794880, 140490910887935, +STORE, 140490910887936, 140490910892031, +STORE, 140490910892032, 140490910896127, +STORE, 140490910896128, 140490912555007, +STORE, 140490912555008, 140490914652159, +STORE, 140490914652160, 140490914668543, +STORE, 140490914668544, 140490914676735, +STORE, 140490914676736, 140490914693119, +STORE, 140490914693120, 140490914791423, +STORE, 140490914791424, 140490916884479, +STORE, 140490916884480, 140490916888575, +STORE, 140490916888576, 140490916892671, +STORE, 140490916892672, 140490916909055, +STORE, 140490916909056, 140490916937727, +STORE, 140490916937728, 140490919030783, +STORE, 140490919030784, 140490919034879, +STORE, 140490919034880, 140490919038975, +STORE, 140490919038976, 140490919190527, +STORE, 140490919190528, 140490921283583, +STORE, 140490921283584, 140490921287679, +STORE, 140490921287680, 140490921291775, +STORE, 140490921291776, 140490921299967, +STORE, 140490921299968, 140490921390079, +STORE, 140490921390080, 140490923483135, +STORE, 140490923483136, 140490923487231, +STORE, 140490923487232, 140490923491327, +STORE, 140490923491328, 140490923757567, +STORE, 140490923757568, 140490925850623, +STORE, 140490925850624, 140490925867007, +STORE, 140490925867008, 140490925871103, +STORE, 140490925871104, 140490925875199, +STORE, 140490925875200, 140490925903871, +STORE, 140490925903872, 140490928001023, +STORE, 140490928001024, 140490928005119, +STORE, 140490928005120, 140490928009215, +STORE, 140490928009216, 140490928152575, +STORE, 140490930184192, 140490930221055, +STORE, 140490930221056, 140490930237439, +STORE, 140490930237440, 140490930241535, +STORE, 140490930241536, 140490930245631, +STORE, 140490930245632, 140490930249727, +STORE, 140490930249728, 140490930253823, +STORE, 140490930253824, 140490930257919, +STORE, 140490930257920, 140490930262015, +STORE, 140724611694592, 140724611829759, +STORE, 140724612427776, 140724612440063, +STORE, 140724612440064, 140724612444159, +STORE, 94103163662336, 94103163772927, +STORE, 94103165865984, 94103165874175, +STORE, 94103165874176, 94103165878271, +STORE, 94103165878272, 94103165886463, +STORE, 94103182548992, 94103182684159, +STORE, 140092694708224, 140092696367103, +STORE, 140092696367104, 140092698464255, +STORE, 140092698464256, 140092698480639, +STORE, 140092698480640, 140092698488831, +STORE, 140092698488832, 140092698505215, +STORE, 140092698505216, 140092698648575, +STORE, 140092700708864, 140092700717055, +STORE, 140092700745728, 140092700749823, +STORE, 140092700749824, 140092700753919, +STORE, 140092700753920, 140092700758015, +STORE, 140736800911360, 140736801046527, +STORE, 140736802308096, 140736802320383, +STORE, 140736802320384, 140736802324479, +STORE, 93948802064384, 93948802174975, +STORE, 93948804268032, 93948804276223, +STORE, 93948804276224, 93948804280319, +STORE, 93948804280320, 93948804288511, +STORE, 93948806266880, 93948806402047, +STORE, 140222999113728, 140223000772607, +STORE, 140223000772608, 140223002869759, +STORE, 140223002869760, 140223002886143, +STORE, 140223002886144, 140223002894335, +STORE, 140223002894336, 140223002910719, +STORE, 140223002910720, 140223003054079, +STORE, 140223005114368, 140223005122559, +STORE, 140223005151232, 140223005155327, +STORE, 140223005155328, 140223005159423, +STORE, 140223005159424, 140223005163519, +STORE, 140720877506560, 140720877641727, +STORE, 140720878231552, 140720878243839, +STORE, 140720878243840, 140720878247935, +STORE, 140737488347136, 140737488351231, +STORE, 140733232087040, 140737488351231, +SNULL, 140733232091135, 140737488351231, +STORE, 140733232087040, 140733232091135, +STORE, 140733231955968, 140733232091135, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140161681321984, 140161683574783, +SNULL, 140161681465343, 140161683574783, +STORE, 140161681321984, 140161681465343, +STORE, 140161681465344, 140161683574783, +ERASE, 140161681465344, 140161683574783, +STORE, 140161683562496, 140161683570687, +STORE, 140161683570688, 140161683574783, +STORE, 140733232214016, 140733232218111, +STORE, 140733232201728, 140733232214015, +STORE, 140161683533824, 140161683562495, +STORE, 140161683525632, 140161683533823, +STORE, 140161678159872, 140161681321983, +SNULL, 140161678159872, 140161679220735, +STORE, 140161679220736, 140161681321983, +STORE, 140161678159872, 140161679220735, +SNULL, 140161681313791, 140161681321983, +STORE, 140161679220736, 140161681313791, +STORE, 140161681313792, 140161681321983, +ERASE, 140161681313792, 140161681321983, +STORE, 140161681313792, 140161681321983, +STORE, 140161674362880, 140161678159871, +SNULL, 140161674362880, 140161676021759, +STORE, 140161676021760, 140161678159871, +STORE, 140161674362880, 140161676021759, +SNULL, 140161678118911, 140161678159871, +STORE, 140161676021760, 140161678118911, +STORE, 140161678118912, 140161678159871, +SNULL, 140161678118912, 140161678143487, +STORE, 140161678143488, 140161678159871, +STORE, 140161678118912, 140161678143487, +ERASE, 140161678118912, 140161678143487, +STORE, 140161678118912, 140161678143487, +ERASE, 140161678143488, 140161678159871, +STORE, 140161678143488, 140161678159871, +STORE, 140161683513344, 140161683533823, +SNULL, 140161678135295, 140161678143487, +STORE, 140161678118912, 140161678135295, +STORE, 140161678135296, 140161678143487, +SNULL, 140161681317887, 140161681321983, +STORE, 140161681313792, 140161681317887, +STORE, 140161681317888, 140161681321983, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140161683566591, 140161683570687, +STORE, 140161683562496, 140161683566591, +STORE, 140161683566592, 140161683570687, +ERASE, 140161683533824, 140161683562495, +STORE, 25477120, 25612287, +STORE, 25477120, 25759743, +STORE, 140161681829888, 140161683513343, +STORE, 25477120, 25915391, +STORE, 25477120, 26054655, +SNULL, 25800703, 26054655, +STORE, 25477120, 25800703, +STORE, 25800704, 26054655, +ERASE, 25800704, 26054655, +STORE, 140737488347136, 140737488351231, +STORE, 140723218452480, 140737488351231, +SNULL, 140723218456575, 140737488351231, +STORE, 140723218452480, 140723218456575, +STORE, 140723218321408, 140723218456575, +STORE, 4194304, 26279935, +STORE, 28372992, 28454911, +STORE, 28454912, 29806591, +STORE, 140398872264704, 140398874517503, +SNULL, 140398872408063, 140398874517503, +STORE, 140398872264704, 140398872408063, +STORE, 140398872408064, 140398874517503, +ERASE, 140398872408064, 140398874517503, +STORE, 140398874505216, 140398874513407, +STORE, 140398874513408, 140398874517503, +STORE, 140723219247104, 140723219251199, +STORE, 140723219234816, 140723219247103, +STORE, 140398874476544, 140398874505215, +STORE, 140398874468352, 140398874476543, +STORE, 140398868430848, 140398872264703, +SNULL, 140398868430848, 140398870138879, +STORE, 140398870138880, 140398872264703, +STORE, 140398868430848, 140398870138879, +SNULL, 140398872231935, 140398872264703, +STORE, 140398870138880, 140398872231935, +STORE, 140398872231936, 140398872264703, +ERASE, 140398872231936, 140398872264703, +STORE, 140398872231936, 140398872264703, +STORE, 140398866235392, 140398868430847, +SNULL, 140398866235392, 140398866329599, +STORE, 140398866329600, 140398868430847, +STORE, 140398866235392, 140398866329599, +SNULL, 140398868422655, 140398868430847, +STORE, 140398866329600, 140398868422655, +STORE, 140398868422656, 140398868430847, +ERASE, 140398868422656, 140398868430847, +STORE, 140398868422656, 140398868430847, +STORE, 140398863716352, 140398866235391, +SNULL, 140398863716352, 140398864130047, +STORE, 140398864130048, 140398866235391, +STORE, 140398863716352, 140398864130047, +SNULL, 140398866223103, 140398866235391, +STORE, 140398864130048, 140398866223103, +STORE, 140398866223104, 140398866235391, +ERASE, 140398866223104, 140398866235391, +STORE, 140398866223104, 140398866235391, +STORE, 140398861082624, 140398863716351, +SNULL, 140398861082624, 140398861611007, +STORE, 140398861611008, 140398863716351, +STORE, 140398861082624, 140398861611007, +SNULL, 140398863708159, 140398863716351, +STORE, 140398861611008, 140398863708159, +STORE, 140398863708160, 140398863716351, +ERASE, 140398863708160, 140398863716351, +STORE, 140398863708160, 140398863716351, +STORE, 140398858969088, 140398861082623, +SNULL, 140398858969088, 140398858981375, +STORE, 140398858981376, 140398861082623, +STORE, 140398858969088, 140398858981375, +SNULL, 140398861074431, 140398861082623, +STORE, 140398858981376, 140398861074431, +STORE, 140398861074432, 140398861082623, +ERASE, 140398861074432, 140398861082623, +STORE, 140398861074432, 140398861082623, +STORE, 140398856765440, 140398858969087, +SNULL, 140398856765440, 140398856867839, +STORE, 140398856867840, 140398858969087, +STORE, 140398856765440, 140398856867839, +SNULL, 140398858960895, 140398858969087, +STORE, 140398856867840, 140398858960895, +STORE, 140398858960896, 140398858969087, +ERASE, 140398858960896, 140398858969087, +STORE, 140398858960896, 140398858969087, +STORE, 140398874460160, 140398874476543, +STORE, 140398853603328, 140398856765439, +SNULL, 140398853603328, 140398854664191, +STORE, 140398854664192, 140398856765439, +STORE, 140398853603328, 140398854664191, +SNULL, 140398856757247, 140398856765439, +STORE, 140398854664192, 140398856757247, +STORE, 140398856757248, 140398856765439, +ERASE, 140398856757248, 140398856765439, +STORE, 140398856757248, 140398856765439, +STORE, 140398849806336, 140398853603327, +SNULL, 140398849806336, 140398851465215, +STORE, 140398851465216, 140398853603327, +STORE, 140398849806336, 140398851465215, +SNULL, 140398853562367, 140398853603327, +STORE, 140398851465216, 140398853562367, +STORE, 140398853562368, 140398853603327, +SNULL, 140398853562368, 140398853586943, +STORE, 140398853586944, 140398853603327, +STORE, 140398853562368, 140398853586943, +ERASE, 140398853562368, 140398853586943, +STORE, 140398853562368, 140398853586943, +ERASE, 140398853586944, 140398853603327, +STORE, 140398853586944, 140398853603327, +STORE, 140398874447872, 140398874476543, +SNULL, 140398853578751, 140398853586943, +STORE, 140398853562368, 140398853578751, +STORE, 140398853578752, 140398853586943, +SNULL, 140398856761343, 140398856765439, +STORE, 140398856757248, 140398856761343, +STORE, 140398856761344, 140398856765439, +SNULL, 140398858964991, 140398858969087, +STORE, 140398858960896, 140398858964991, +STORE, 140398858964992, 140398858969087, +SNULL, 140398861078527, 140398861082623, +STORE, 140398861074432, 140398861078527, +STORE, 140398861078528, 140398861082623, +SNULL, 140398863712255, 140398863716351, +STORE, 140398863708160, 140398863712255, +STORE, 140398863712256, 140398863716351, +SNULL, 140398866231295, 140398866235391, +STORE, 140398866223104, 140398866231295, +STORE, 140398866231296, 140398866235391, +SNULL, 140398868426751, 140398868430847, +STORE, 140398868422656, 140398868426751, +STORE, 140398868426752, 140398868430847, +SNULL, 140398872236031, 140398872264703, +STORE, 140398872231936, 140398872236031, +STORE, 140398872236032, 140398872264703, +SNULL, 28405759, 28454911, +STORE, 28372992, 28405759, +STORE, 28405760, 28454911, +SNULL, 140398874509311, 140398874513407, +STORE, 140398874505216, 140398874509311, +STORE, 140398874509312, 140398874513407, +ERASE, 140398874476544, 140398874505215, +STORE, 43278336, 43413503, +STORE, 140398872764416, 140398874447871, +STORE, 140398874501120, 140398874505215, +STORE, 140398872629248, 140398872764415, +STORE, 43278336, 43556863, +STORE, 140398847709184, 140398849806335, +STORE, 140398874492928, 140398874505215, +STORE, 140398874484736, 140398874505215, +STORE, 140398874447872, 140398874484735, +STORE, 140398872612864, 140398872764415, +STORE, 43278336, 43692031, +STORE, 43278336, 43880447, +STORE, 140398872604672, 140398872764415, +STORE, 140398872596480, 140398872764415, +STORE, 43278336, 44044287, +STORE, 140398872580096, 140398872764415, +STORE, 140737488347136, 140737488351231, +STORE, 140734403092480, 140737488351231, +SNULL, 140734403096575, 140737488351231, +STORE, 140734403092480, 140734403096575, +STORE, 140734402961408, 140734403096575, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140240662380544, 140240664633343, +SNULL, 140240662523903, 140240664633343, +STORE, 140240662380544, 140240662523903, +STORE, 140240662523904, 140240664633343, +ERASE, 140240662523904, 140240664633343, +STORE, 140240664621056, 140240664629247, +STORE, 140240664629248, 140240664633343, +STORE, 140734403145728, 140734403149823, +STORE, 140734403133440, 140734403145727, +STORE, 140240664592384, 140240664621055, +STORE, 140240664584192, 140240664592383, +STORE, 140240659218432, 140240662380543, +SNULL, 140240659218432, 140240660279295, +STORE, 140240660279296, 140240662380543, +STORE, 140240659218432, 140240660279295, +SNULL, 140240662372351, 140240662380543, +STORE, 140240660279296, 140240662372351, +STORE, 140240662372352, 140240662380543, +ERASE, 140240662372352, 140240662380543, +STORE, 140240662372352, 140240662380543, +STORE, 140240655421440, 140240659218431, +SNULL, 140240655421440, 140240657080319, +STORE, 140240657080320, 140240659218431, +STORE, 140240655421440, 140240657080319, +SNULL, 140240659177471, 140240659218431, +STORE, 140240657080320, 140240659177471, +STORE, 140240659177472, 140240659218431, +SNULL, 140240659177472, 140240659202047, +STORE, 140240659202048, 140240659218431, +STORE, 140240659177472, 140240659202047, +ERASE, 140240659177472, 140240659202047, +STORE, 140240659177472, 140240659202047, +ERASE, 140240659202048, 140240659218431, +STORE, 140240659202048, 140240659218431, +STORE, 140240664571904, 140240664592383, +SNULL, 140240659193855, 140240659202047, +STORE, 140240659177472, 140240659193855, +STORE, 140240659193856, 140240659202047, +SNULL, 140240662376447, 140240662380543, +STORE, 140240662372352, 140240662376447, +STORE, 140240662376448, 140240662380543, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140240664625151, 140240664629247, +STORE, 140240664621056, 140240664625151, +STORE, 140240664625152, 140240664629247, +ERASE, 140240664592384, 140240664621055, +STORE, 30646272, 30781439, +STORE, 30646272, 30928895, +STORE, 140240662888448, 140240664571903, +STORE, 94256659468288, 94256659578879, +STORE, 94256661671936, 94256661680127, +STORE, 94256661680128, 94256661684223, +STORE, 94256661684224, 94256661692415, +STORE, 94256687980544, 94256688115711, +STORE, 139801712504832, 139801714163711, +STORE, 139801714163712, 139801716260863, +STORE, 139801716260864, 139801716277247, +STORE, 139801716277248, 139801716285439, +STORE, 139801716285440, 139801716301823, +STORE, 139801716301824, 139801716445183, +STORE, 139801718505472, 139801718513663, +STORE, 139801718542336, 139801718546431, +STORE, 139801718546432, 139801718550527, +STORE, 139801718550528, 139801718554623, +STORE, 140721575538688, 140721575673855, +STORE, 140721577013248, 140721577025535, +STORE, 140721577025536, 140721577029631, +STORE, 140737488347136, 140737488351231, +STORE, 140729259393024, 140737488351231, +SNULL, 140729259397119, 140737488351231, +STORE, 140729259393024, 140729259397119, +STORE, 140729259261952, 140729259397119, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 139682376638464, 139682378891263, +SNULL, 139682376781823, 139682378891263, +STORE, 139682376638464, 139682376781823, +STORE, 139682376781824, 139682378891263, +ERASE, 139682376781824, 139682378891263, +STORE, 139682378878976, 139682378887167, +STORE, 139682378887168, 139682378891263, +STORE, 140729260462080, 140729260466175, +STORE, 140729260449792, 140729260462079, +STORE, 139682378850304, 139682378878975, +STORE, 139682378842112, 139682378850303, +STORE, 139682373476352, 139682376638463, +SNULL, 139682373476352, 139682374537215, +STORE, 139682374537216, 139682376638463, +STORE, 139682373476352, 139682374537215, +SNULL, 139682376630271, 139682376638463, +STORE, 139682374537216, 139682376630271, +STORE, 139682376630272, 139682376638463, +ERASE, 139682376630272, 139682376638463, +STORE, 139682376630272, 139682376638463, +STORE, 139682369679360, 139682373476351, +SNULL, 139682369679360, 139682371338239, +STORE, 139682371338240, 139682373476351, +STORE, 139682369679360, 139682371338239, +SNULL, 139682373435391, 139682373476351, +STORE, 139682371338240, 139682373435391, +STORE, 139682373435392, 139682373476351, +SNULL, 139682373435392, 139682373459967, +STORE, 139682373459968, 139682373476351, +STORE, 139682373435392, 139682373459967, +ERASE, 139682373435392, 139682373459967, +STORE, 139682373435392, 139682373459967, +ERASE, 139682373459968, 139682373476351, +STORE, 139682373459968, 139682373476351, +STORE, 139682378829824, 139682378850303, +SNULL, 139682373451775, 139682373459967, +STORE, 139682373435392, 139682373451775, +STORE, 139682373451776, 139682373459967, +SNULL, 139682376634367, 139682376638463, +STORE, 139682376630272, 139682376634367, +STORE, 139682376634368, 139682376638463, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 139682378883071, 139682378887167, +STORE, 139682378878976, 139682378883071, +STORE, 139682378883072, 139682378887167, +ERASE, 139682378850304, 139682378878975, +STORE, 10022912, 10158079, +STORE, 10022912, 10305535, +STORE, 139682377146368, 139682378829823, +STORE, 140737488347136, 140737488351231, +STORE, 140731831926784, 140737488351231, +SNULL, 140731831930879, 140737488351231, +STORE, 140731831926784, 140731831930879, +STORE, 140731831795712, 140731831930879, +STORE, 94615305261056, 94615307485183, +SNULL, 94615305371647, 94615307485183, +STORE, 94615305261056, 94615305371647, +STORE, 94615305371648, 94615307485183, +ERASE, 94615305371648, 94615307485183, +STORE, 94615307464704, 94615307476991, +STORE, 94615307476992, 94615307485183, +STORE, 140163912994816, 140163915247615, +SNULL, 140163913138175, 140163915247615, +STORE, 140163912994816, 140163913138175, +STORE, 140163913138176, 140163915247615, +ERASE, 140163913138176, 140163915247615, +STORE, 140163915235328, 140163915243519, +STORE, 140163915243520, 140163915247615, +STORE, 140731832217600, 140731832221695, +STORE, 140731832205312, 140731832217599, +STORE, 140163915206656, 140163915235327, +STORE, 140163915198464, 140163915206655, +STORE, 140163909197824, 140163912994815, +SNULL, 140163909197824, 140163910856703, +STORE, 140163910856704, 140163912994815, +STORE, 140163909197824, 140163910856703, +SNULL, 140163912953855, 140163912994815, +STORE, 140163910856704, 140163912953855, +STORE, 140163912953856, 140163912994815, +SNULL, 140163912953856, 140163912978431, +STORE, 140163912978432, 140163912994815, +STORE, 140163912953856, 140163912978431, +ERASE, 140163912953856, 140163912978431, +STORE, 140163912953856, 140163912978431, +ERASE, 140163912978432, 140163912994815, +STORE, 140163912978432, 140163912994815, +SNULL, 140163912970239, 140163912978431, +STORE, 140163912953856, 140163912970239, +STORE, 140163912970240, 140163912978431, +SNULL, 94615307472895, 94615307476991, +STORE, 94615307464704, 94615307472895, +STORE, 94615307472896, 94615307476991, +SNULL, 140163915239423, 140163915243519, +STORE, 140163915235328, 140163915239423, +STORE, 140163915239424, 140163915243519, +ERASE, 140163915206656, 140163915235327, +STORE, 94615330672640, 94615330807807, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725254479872, 140737488351231, +SNULL, 140725254488063, 140737488351231, +STORE, 140725254479872, 140725254488063, +STORE, 140725254348800, 140725254488063, +STORE, 94572781277184, 94572785741823, +SNULL, 94572783312895, 94572785741823, +STORE, 94572781277184, 94572783312895, +STORE, 94572783312896, 94572785741823, +ERASE, 94572783312896, 94572785741823, +STORE, 94572785405952, 94572785455103, +STORE, 94572785455104, 94572785741823, +STORE, 139636001341440, 139636003594239, +SNULL, 139636001484799, 139636003594239, +STORE, 139636001341440, 139636001484799, +STORE, 139636001484800, 139636003594239, +ERASE, 139636001484800, 139636003594239, +STORE, 139636003581952, 139636003590143, +STORE, 139636003590144, 139636003594239, +STORE, 140725255557120, 140725255561215, +STORE, 140725255544832, 140725255557119, +STORE, 139636003553280, 139636003581951, +STORE, 139636003545088, 139636003553279, +STORE, 139635998773248, 139636001341439, +SNULL, 139635998773248, 139635999240191, +STORE, 139635999240192, 139636001341439, +STORE, 139635998773248, 139635999240191, +SNULL, 139636001333247, 139636001341439, +STORE, 139635999240192, 139636001333247, +STORE, 139636001333248, 139636001341439, +ERASE, 139636001333248, 139636001341439, +STORE, 139636001333248, 139636001341439, +STORE, 139635996569600, 139635998773247, +SNULL, 139635996569600, 139635996671999, +STORE, 139635996672000, 139635998773247, +STORE, 139635996569600, 139635996671999, +SNULL, 139635998765055, 139635998773247, +STORE, 139635996672000, 139635998765055, +STORE, 139635998765056, 139635998773247, +ERASE, 139635998765056, 139635998773247, +STORE, 139635998765056, 139635998773247, +STORE, 139635994353664, 139635996569599, +SNULL, 139635994353664, 139635994451967, +STORE, 139635994451968, 139635996569599, +STORE, 139635994353664, 139635994451967, +SNULL, 139635996545023, 139635996569599, +STORE, 139635994451968, 139635996545023, +STORE, 139635996545024, 139635996569599, +SNULL, 139635996545024, 139635996553215, +STORE, 139635996553216, 139635996569599, +STORE, 139635996545024, 139635996553215, +ERASE, 139635996545024, 139635996553215, +STORE, 139635996545024, 139635996553215, +ERASE, 139635996553216, 139635996569599, +STORE, 139635996553216, 139635996569599, +STORE, 139635992223744, 139635994353663, +SNULL, 139635992223744, 139635992252415, +STORE, 139635992252416, 139635994353663, +STORE, 139635992223744, 139635992252415, +SNULL, 139635994345471, 139635994353663, +STORE, 139635992252416, 139635994345471, +STORE, 139635994345472, 139635994353663, +ERASE, 139635994345472, 139635994353663, +STORE, 139635994345472, 139635994353663, +STORE, 139635988426752, 139635992223743, +SNULL, 139635988426752, 139635990085631, +STORE, 139635990085632, 139635992223743, +STORE, 139635988426752, 139635990085631, +SNULL, 139635992182783, 139635992223743, +STORE, 139635990085632, 139635992182783, +STORE, 139635992182784, 139635992223743, +SNULL, 139635992182784, 139635992207359, +STORE, 139635992207360, 139635992223743, +STORE, 139635992182784, 139635992207359, +ERASE, 139635992182784, 139635992207359, +STORE, 139635992182784, 139635992207359, +ERASE, 139635992207360, 139635992223743, +STORE, 139635992207360, 139635992223743, +STORE, 139636003536896, 139636003553279, +SNULL, 139635992199167, 139635992207359, +STORE, 139635992182784, 139635992199167, +STORE, 139635992199168, 139635992207359, +SNULL, 139635996549119, 139635996553215, +STORE, 139635996545024, 139635996549119, +STORE, 139635996549120, 139635996553215, +SNULL, 139635994349567, 139635994353663, +STORE, 139635994345472, 139635994349567, +STORE, 139635994349568, 139635994353663, +SNULL, 139635998769151, 139635998773247, +STORE, 139635998765056, 139635998769151, +STORE, 139635998769152, 139635998773247, +SNULL, 139636001337343, 139636001341439, +STORE, 139636001333248, 139636001337343, +STORE, 139636001337344, 139636001341439, +SNULL, 94572785418239, 94572785455103, +STORE, 94572785405952, 94572785418239, +STORE, 94572785418240, 94572785455103, +SNULL, 139636003586047, 139636003590143, +STORE, 139636003581952, 139636003586047, +STORE, 139636003586048, 139636003590143, +ERASE, 139636003553280, 139636003581951, +STORE, 94572798435328, 94572798570495, +STORE, 139636001853440, 139636003536895, +STORE, 139635981426688, 139635988426751, +STORE, 139635980615680, 139635981426687, +STORE, 94572798435328, 94572798705663, +STORE, 94572798435328, 94572798840831, +STORE, 94572798435328, 94572798975999, +STORE, 94572798435328, 94572799111167, +STORE, 94572798435328, 94572799246335, +STORE, 94572798435328, 94572799381503, +STORE, 94572798435328, 94572799516671, +STORE, 94572798435328, 94572799651839, +STORE, 94572798435328, 94572799787007, +STORE, 94572798435328, 94572799922175, +STORE, 94572798435328, 94572800057343, +STORE, 94572798435328, 94572800192511, +STORE, 94572798435328, 94572800327679, +STORE, 94572798435328, 94572800462847, +STORE, 94572798435328, 94572800598015, +STORE, 94572798435328, 94572800733183, +STORE, 94572798435328, 94572800868351, +STORE, 94572798435328, 94572801003519, +STORE, 94572798435328, 94572801138687, +STORE, 94572798435328, 94572801273855, +STORE, 94572798435328, 94572801409023, +STORE, 94572798435328, 94572801544191, +STORE, 94572798435328, 94572801679359, +STORE, 94572798435328, 94572801814527, +STORE, 94572798435328, 94572801949695, +STORE, 94572798435328, 94572802084863, +STORE, 94572798435328, 94572802220031, +STORE, 94572798435328, 94572802355199, +STORE, 94572798435328, 94572802490367, +STORE, 94572798435328, 94572802625535, +STORE, 94572798435328, 94572802760703, +STORE, 94572798435328, 94572802895871, +STORE, 94572798435328, 94572803031039, +STORE, 94572798435328, 94572803166207, +STORE, 94572798435328, 94572803301375, +STORE, 94572798435328, 94572803436543, +STORE, 94572798435328, 94572803571711, +STORE, 94572798435328, 94572803706879, +STORE, 94572798435328, 94572803842047, +STORE, 94572798435328, 94572803977215, +STORE, 94572798435328, 94572804112383, +STORE, 94572798435328, 94572804247551, +STORE, 94572798435328, 94572804382719, +STORE, 94572798435328, 94572804517887, +STORE, 94572798435328, 94572804653055, +STORE, 94572798435328, 94572804788223, +STORE, 94572798435328, 94572804923391, +STORE, 94572798435328, 94572805058559, +STORE, 94572798435328, 94572805193727, +STORE, 94572798435328, 94572805328895, +STORE, 94572798435328, 94572805464063, +STORE, 94572798435328, 94572805599231, +STORE, 94572798435328, 94572805734399, +STORE, 94572798435328, 94572805869567, +STORE, 94572798435328, 94572806004735, +STORE, 94572798435328, 94572806139903, +STORE, 94572798435328, 94572806275071, +STORE, 94572798435328, 94572806410239, +STORE, 94572798435328, 94572806545407, +STORE, 94572798435328, 94572806680575, +STORE, 94572798435328, 94572806815743, +STORE, 94572798435328, 94572806950911, +STORE, 94572798435328, 94572807086079, +STORE, 94572798435328, 94572807221247, +STORE, 94572798435328, 94572807356415, +STORE, 94572798435328, 94572807491583, +STORE, 94572798435328, 94572807626751, +STORE, 94572798435328, 94572807761919, +STORE, 94572798435328, 94572807897087, +STORE, 94572798435328, 94572808032255, +STORE, 94572798435328, 94572808167423, +STORE, 94572798435328, 94572808302591, +STORE, 94572798435328, 94572808437759, +STORE, 94572798435328, 94572808572927, +ERASE, 139635981426688, 139635988426751, +STORE, 139635985088512, 139635988426751, +STORE, 139635778273280, 139635980615679, +STORE, 139635567632384, 139635778273279, +STORE, 94572798435328, 94572808716287, +STORE, 139635984564224, 139635985088511, +STORE, 139635559239680, 139635567632383, +SNULL, 139635559243775, 139635567632383, +STORE, 139635559239680, 139635559243775, +STORE, 139635559243776, 139635567632383, +STORE, 139635550846976, 139635559239679, +SNULL, 139635550851071, 139635559239679, +STORE, 139635550846976, 139635550851071, +STORE, 139635550851072, 139635559239679, +STORE, 139635542454272, 139635550846975, +STORE, 139635408236544, 139635542454271, +SNULL, 139635408236544, 139635426590719, +STORE, 139635426590720, 139635542454271, +STORE, 139635408236544, 139635426590719, +ERASE, 139635408236544, 139635426590719, +STORE, 139635292372992, 139635542454271, +SNULL, 139635359481855, 139635542454271, +STORE, 139635292372992, 139635359481855, +STORE, 139635359481856, 139635542454271, +SNULL, 139635359481856, 139635426590719, +STORE, 139635426590720, 139635542454271, +STORE, 139635359481856, 139635426590719, +ERASE, 139635359481856, 139635426590719, +SNULL, 139635542458367, 139635550846975, +STORE, 139635542454272, 139635542458367, +STORE, 139635542458368, 139635550846975, +STORE, 139635418198016, 139635426590719, +SNULL, 139635493699583, 139635542454271, +STORE, 139635426590720, 139635493699583, +STORE, 139635493699584, 139635542454271, +ERASE, 139635493699584, 139635542454271, +SNULL, 139635426725887, 139635493699583, +STORE, 139635426590720, 139635426725887, +STORE, 139635426725888, 139635493699583, +SNULL, 139635292508159, 139635359481855, +STORE, 139635292372992, 139635292508159, +STORE, 139635292508160, 139635359481855, +SNULL, 139635418202111, 139635426590719, +STORE, 139635418198016, 139635418202111, +STORE, 139635418202112, 139635426590719, +STORE, 139635225264128, 139635292372991, +STORE, 139635534061568, 139635542454271, +SNULL, 139635534065663, 139635542454271, +STORE, 139635534061568, 139635534065663, +STORE, 139635534065664, 139635542454271, +STORE, 139635525668864, 139635534061567, +SNULL, 139635525672959, 139635534061567, +STORE, 139635525668864, 139635525672959, +STORE, 139635525672960, 139635534061567, +SNULL, 139635225399295, 139635292372991, +STORE, 139635225264128, 139635225399295, +STORE, 139635225399296, 139635292372991, +STORE, 139635091046400, 139635225264127, +SNULL, 139635158155263, 139635225264127, +STORE, 139635091046400, 139635158155263, +STORE, 139635158155264, 139635225264127, +ERASE, 139635158155264, 139635225264127, +STORE, 139634956828672, 139635158155263, +STORE, 139635517276160, 139635525668863, +SNULL, 139635517280255, 139635525668863, +STORE, 139635517276160, 139635517280255, +STORE, 139635517280256, 139635525668863, +SNULL, 139634956828672, 139635091046399, +STORE, 139635091046400, 139635158155263, +STORE, 139634956828672, 139635091046399, +SNULL, 139635091181567, 139635158155263, +STORE, 139635091046400, 139635091181567, +STORE, 139635091181568, 139635158155263, +SNULL, 139635023937535, 139635091046399, +STORE, 139634956828672, 139635023937535, +STORE, 139635023937536, 139635091046399, +ERASE, 139635023937536, 139635091046399, +STORE, 139634956828672, 139635091046399, +SNULL, 139634956828672, 139635023937535, +STORE, 139635023937536, 139635091046399, +STORE, 139634956828672, 139635023937535, +SNULL, 139635024072703, 139635091046399, +STORE, 139635023937536, 139635024072703, +STORE, 139635024072704, 139635091046399, +STORE, 139635508883456, 139635517276159, +SNULL, 139635508887551, 139635517276159, +STORE, 139635508883456, 139635508887551, +STORE, 139635508887552, 139635517276159, +STORE, 139634822610944, 139635023937535, +SNULL, 139634822610944, 139634956828671, +STORE, 139634956828672, 139635023937535, +STORE, 139634822610944, 139634956828671, +SNULL, 139634956963839, 139635023937535, +STORE, 139634956828672, 139634956963839, +STORE, 139634956963840, 139635023937535, +STORE, 139635500490752, 139635508883455, +SNULL, 139634889719807, 139634956828671, +STORE, 139634822610944, 139634889719807, +STORE, 139634889719808, 139634956828671, +ERASE, 139634889719808, 139634956828671, +SNULL, 139635500494847, 139635508883455, +STORE, 139635500490752, 139635500494847, +STORE, 139635500494848, 139635508883455, +SNULL, 139634822746111, 139634889719807, +STORE, 139634822610944, 139634822746111, +STORE, 139634822746112, 139634889719807, +STORE, 139635409805312, 139635418198015, +STORE, 139634822746112, 139634956828671, +SNULL, 139634822746112, 139634889719807, +STORE, 139634889719808, 139634956828671, +STORE, 139634822746112, 139634889719807, +SNULL, 139634889854975, 139634956828671, +STORE, 139634889719808, 139634889854975, +STORE, 139634889854976, 139634956828671, +SNULL, 139635409809407, 139635418198015, +STORE, 139635409805312, 139635409809407, +STORE, 139635409809408, 139635418198015, +STORE, 139635401412608, 139635409805311, +STORE, 139634688393216, 139634822610943, +SNULL, 139634755502079, 139634822610943, +STORE, 139634688393216, 139634755502079, +STORE, 139634755502080, 139634822610943, +ERASE, 139634755502080, 139634822610943, +SNULL, 139635401416703, 139635409805311, +STORE, 139635401412608, 139635401416703, +STORE, 139635401416704, 139635409805311, +STORE, 139634554175488, 139634755502079, +SNULL, 139634554175488, 139634688393215, +STORE, 139634688393216, 139634755502079, +STORE, 139634554175488, 139634688393215, +SNULL, 139634688528383, 139634755502079, +STORE, 139634688393216, 139634688528383, +STORE, 139634688528384, 139634755502079, +STORE, 139635393019904, 139635401412607, +SNULL, 139634621284351, 139634688393215, +STORE, 139634554175488, 139634621284351, +STORE, 139634621284352, 139634688393215, +ERASE, 139634621284352, 139634688393215, +SNULL, 139634554310655, 139634621284351, +STORE, 139634554175488, 139634554310655, +STORE, 139634554310656, 139634621284351, +STORE, 139634554310656, 139634688393215, +SNULL, 139635393023999, 139635401412607, +STORE, 139635393019904, 139635393023999, +STORE, 139635393024000, 139635401412607, +SNULL, 139634554310656, 139634621284351, +STORE, 139634621284352, 139634688393215, +STORE, 139634554310656, 139634621284351, +SNULL, 139634621419519, 139634688393215, +STORE, 139634621284352, 139634621419519, +STORE, 139634621419520, 139634688393215, +STORE, 139635384627200, 139635393019903, +SNULL, 139635384631295, 139635393019903, +STORE, 139635384627200, 139635384631295, +STORE, 139635384631296, 139635393019903, +STORE, 139635376234496, 139635384627199, +SNULL, 139635376238591, 139635384627199, +STORE, 139635376234496, 139635376238591, +STORE, 139635376238592, 139635384627199, +STORE, 139635367841792, 139635376234495, +SNULL, 139635367845887, 139635376234495, +STORE, 139635367841792, 139635367845887, +STORE, 139635367845888, 139635376234495, +STORE, 139634419957760, 139634554175487, +SNULL, 139634487066623, 139634554175487, +STORE, 139634419957760, 139634487066623, +STORE, 139634487066624, 139634554175487, +ERASE, 139634487066624, 139634554175487, +STORE, 139635216871424, 139635225264127, +SNULL, 139635216875519, 139635225264127, +STORE, 139635216871424, 139635216875519, +STORE, 139635216875520, 139635225264127, +SNULL, 139634420092927, 139634487066623, +STORE, 139634419957760, 139634420092927, +STORE, 139634420092928, 139634487066623, +STORE, 139635208478720, 139635216871423, +SNULL, 139635208482815, 139635216871423, +STORE, 139635208478720, 139635208482815, +STORE, 139635208482816, 139635216871423, +STORE, 139635200086016, 139635208478719, +SNULL, 139635200090111, 139635208478719, +STORE, 139635200086016, 139635200090111, +STORE, 139635200090112, 139635208478719, +STORE, 139635191693312, 139635200086015, +SNULL, 139635191697407, 139635200086015, +STORE, 139635191693312, 139635191697407, +STORE, 139635191697408, 139635200086015, +STORE, 139635183300608, 139635191693311, +SNULL, 139635183304703, 139635191693311, +STORE, 139635183300608, 139635183304703, +STORE, 139635183304704, 139635191693311, +STORE, 139634420092928, 139634554175487, +SNULL, 139634420092928, 139634487066623, +STORE, 139634487066624, 139634554175487, +STORE, 139634420092928, 139634487066623, +SNULL, 139634487201791, 139634554175487, +STORE, 139634487066624, 139634487201791, +STORE, 139634487201792, 139634554175487, +ERASE, 139635559239680, 139635559243775, +ERASE, 139635559243776, 139635567632383, +ERASE, 139635550846976, 139635550851071, +ERASE, 139635550851072, 139635559239679, +ERASE, 139635542454272, 139635542458367, +ERASE, 139635542458368, 139635550846975, +ERASE, 139635418198016, 139635418202111, +ERASE, 139635418202112, 139635426590719, +ERASE, 139635534061568, 139635534065663, +ERASE, 139635534065664, 139635542454271, +ERASE, 139635525668864, 139635525672959, +ERASE, 139635525672960, 139635534061567, +ERASE, 139635517276160, 139635517280255, +ERASE, 139635517280256, 139635525668863, +ERASE, 139635508883456, 139635508887551, +ERASE, 139635508887552, 139635517276159, +ERASE, 139635500490752, 139635500494847, +ERASE, 139635500494848, 139635508883455, +ERASE, 139635409805312, 139635409809407, +ERASE, 139635409809408, 139635418198015, +ERASE, 139635401412608, 139635401416703, +ERASE, 139635401416704, 139635409805311, +ERASE, 139635393019904, 139635393023999, +ERASE, 139635393024000, 139635401412607, +ERASE, 139635384627200, 139635384631295, +ERASE, 139635384631296, 139635393019903, + }; + unsigned long set25[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722547441664, 140737488351231, +SNULL, 140722547449855, 140737488351231, +STORE, 140722547441664, 140722547449855, +STORE, 140722547310592, 140722547449855, +STORE, 94827521732608, 94827523956735, +SNULL, 94827521843199, 94827523956735, +STORE, 94827521732608, 94827521843199, +STORE, 94827521843200, 94827523956735, +ERASE, 94827521843200, 94827523956735, +STORE, 94827523936256, 94827523948543, +STORE, 94827523948544, 94827523956735, +STORE, 139816136847360, 139816139100159, +SNULL, 139816136990719, 139816139100159, +STORE, 139816136847360, 139816136990719, +STORE, 139816136990720, 139816139100159, +ERASE, 139816136990720, 139816139100159, +STORE, 139816139087872, 139816139096063, +STORE, 139816139096064, 139816139100159, +STORE, 140722548142080, 140722548146175, +STORE, 140722548129792, 140722548142079, +STORE, 139816139059200, 139816139087871, +STORE, 139816139051008, 139816139059199, +STORE, 139816133050368, 139816136847359, +SNULL, 139816133050368, 139816134709247, +STORE, 139816134709248, 139816136847359, +STORE, 139816133050368, 139816134709247, +SNULL, 139816136806399, 139816136847359, +STORE, 139816134709248, 139816136806399, +STORE, 139816136806400, 139816136847359, +SNULL, 139816136806400, 139816136830975, +STORE, 139816136830976, 139816136847359, +STORE, 139816136806400, 139816136830975, +ERASE, 139816136806400, 139816136830975, +STORE, 139816136806400, 139816136830975, +ERASE, 139816136830976, 139816136847359, +STORE, 139816136830976, 139816136847359, +SNULL, 139816136822783, 139816136830975, +STORE, 139816136806400, 139816136822783, +STORE, 139816136822784, 139816136830975, +SNULL, 94827523944447, 94827523948543, +STORE, 94827523936256, 94827523944447, +STORE, 94827523944448, 94827523948543, +SNULL, 139816139091967, 139816139096063, +STORE, 139816139087872, 139816139091967, +STORE, 139816139091968, 139816139096063, +ERASE, 139816139059200, 139816139087871, +STORE, 94827534970880, 94827535106047, +STORE, 94114394132480, 94114394345471, +STORE, 94114396442624, 94114396446719, +STORE, 94114396446720, 94114396454911, +STORE, 94114396454912, 94114396467199, +STORE, 94114421575680, 94114427715583, +STORE, 139934313955328, 139934315614207, +STORE, 139934315614208, 139934317711359, +STORE, 139934317711360, 139934317727743, +STORE, 139934317727744, 139934317735935, +STORE, 139934317735936, 139934317752319, +STORE, 139934317752320, 139934317764607, +STORE, 139934317764608, 139934319857663, +STORE, 139934319857664, 139934319861759, +STORE, 139934319861760, 139934319865855, +STORE, 139934319865856, 139934320009215, +STORE, 139934320377856, 139934322061311, +STORE, 139934322061312, 139934322077695, +STORE, 139934322106368, 139934322110463, +STORE, 139934322110464, 139934322114559, +STORE, 139934322114560, 139934322118655, +STORE, 140731200376832, 140731200516095, +STORE, 140731200929792, 140731200942079, +STORE, 140731200942080, 140731200946175, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140734133174272, 140737488351231, +SNULL, 140734133182463, 140737488351231, +STORE, 140734133174272, 140734133182463, +STORE, 140734133043200, 140734133182463, +STORE, 94412675600384, 94412677824511, +SNULL, 94412675710975, 94412677824511, +STORE, 94412675600384, 94412675710975, +STORE, 94412675710976, 94412677824511, +ERASE, 94412675710976, 94412677824511, +STORE, 94412677804032, 94412677816319, +STORE, 94412677816320, 94412677824511, +STORE, 140320087945216, 140320090198015, +SNULL, 140320088088575, 140320090198015, +STORE, 140320087945216, 140320088088575, +STORE, 140320088088576, 140320090198015, +ERASE, 140320088088576, 140320090198015, +STORE, 140320090185728, 140320090193919, +STORE, 140320090193920, 140320090198015, +STORE, 140734134591488, 140734134595583, +STORE, 140734134579200, 140734134591487, +STORE, 140320090157056, 140320090185727, +STORE, 140320090148864, 140320090157055, +STORE, 140320084148224, 140320087945215, +SNULL, 140320084148224, 140320085807103, +STORE, 140320085807104, 140320087945215, +STORE, 140320084148224, 140320085807103, +SNULL, 140320087904255, 140320087945215, +STORE, 140320085807104, 140320087904255, +STORE, 140320087904256, 140320087945215, +SNULL, 140320087904256, 140320087928831, +STORE, 140320087928832, 140320087945215, +STORE, 140320087904256, 140320087928831, +ERASE, 140320087904256, 140320087928831, +STORE, 140320087904256, 140320087928831, +ERASE, 140320087928832, 140320087945215, +STORE, 140320087928832, 140320087945215, +SNULL, 140320087920639, 140320087928831, +STORE, 140320087904256, 140320087920639, +STORE, 140320087920640, 140320087928831, +SNULL, 94412677812223, 94412677816319, +STORE, 94412677804032, 94412677812223, +STORE, 94412677812224, 94412677816319, +SNULL, 140320090189823, 140320090193919, +STORE, 140320090185728, 140320090189823, +STORE, 140320090189824, 140320090193919, +ERASE, 140320090157056, 140320090185727, +STORE, 94412684546048, 94412684681215, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723005485056, 140737488351231, +SNULL, 140723005493247, 140737488351231, +STORE, 140723005485056, 140723005493247, +STORE, 140723005353984, 140723005493247, +STORE, 94387431936000, 94387434160127, +SNULL, 94387432046591, 94387434160127, +STORE, 94387431936000, 94387432046591, +STORE, 94387432046592, 94387434160127, +ERASE, 94387432046592, 94387434160127, +STORE, 94387434139648, 94387434151935, +STORE, 94387434151936, 94387434160127, +STORE, 140151675392000, 140151677644799, +SNULL, 140151675535359, 140151677644799, +STORE, 140151675392000, 140151675535359, +STORE, 140151675535360, 140151677644799, +ERASE, 140151675535360, 140151677644799, +STORE, 140151677632512, 140151677640703, +STORE, 140151677640704, 140151677644799, +STORE, 140723005784064, 140723005788159, +STORE, 140723005771776, 140723005784063, +STORE, 140151677603840, 140151677632511, +STORE, 140151677595648, 140151677603839, +STORE, 140151671595008, 140151675391999, +SNULL, 140151671595008, 140151673253887, +STORE, 140151673253888, 140151675391999, +STORE, 140151671595008, 140151673253887, +SNULL, 140151675351039, 140151675391999, +STORE, 140151673253888, 140151675351039, +STORE, 140151675351040, 140151675391999, +SNULL, 140151675351040, 140151675375615, +STORE, 140151675375616, 140151675391999, +STORE, 140151675351040, 140151675375615, +ERASE, 140151675351040, 140151675375615, +STORE, 140151675351040, 140151675375615, +ERASE, 140151675375616, 140151675391999, +STORE, 140151675375616, 140151675391999, +SNULL, 140151675367423, 140151675375615, +STORE, 140151675351040, 140151675367423, +STORE, 140151675367424, 140151675375615, +SNULL, 94387434147839, 94387434151935, +STORE, 94387434139648, 94387434147839, +STORE, 94387434147840, 94387434151935, +SNULL, 140151677636607, 140151677640703, +STORE, 140151677632512, 140151677636607, +STORE, 140151677636608, 140151677640703, +ERASE, 140151677603840, 140151677632511, +STORE, 94387458818048, 94387458953215, +STORE, 94909010997248, 94909011210239, +STORE, 94909013307392, 94909013311487, +STORE, 94909013311488, 94909013319679, +STORE, 94909013319680, 94909013331967, +STORE, 94909014827008, 94909023371263, +STORE, 140712411975680, 140712413634559, +STORE, 140712413634560, 140712415731711, +STORE, 140712415731712, 140712415748095, +STORE, 140712415748096, 140712415756287, +STORE, 140712415756288, 140712415772671, +STORE, 140712415772672, 140712415784959, +STORE, 140712415784960, 140712417878015, +STORE, 140712417878016, 140712417882111, +STORE, 140712417882112, 140712417886207, +STORE, 140712417886208, 140712418029567, +STORE, 140712418398208, 140712420081663, +STORE, 140712420081664, 140712420098047, +STORE, 140712420126720, 140712420130815, +STORE, 140712420130816, 140712420134911, +STORE, 140712420134912, 140712420139007, +STORE, 140729293111296, 140729293250559, +STORE, 140729293307904, 140729293320191, +STORE, 140729293320192, 140729293324287, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140720541691904, 140737488351231, +SNULL, 140720541700095, 140737488351231, +STORE, 140720541691904, 140720541700095, +STORE, 140720541560832, 140720541700095, +STORE, 94203603419136, 94203605643263, +SNULL, 94203603529727, 94203605643263, +STORE, 94203603419136, 94203603529727, +STORE, 94203603529728, 94203605643263, +ERASE, 94203603529728, 94203605643263, +STORE, 94203605622784, 94203605635071, +STORE, 94203605635072, 94203605643263, +STORE, 139847623081984, 139847625334783, +SNULL, 139847623225343, 139847625334783, +STORE, 139847623081984, 139847623225343, +STORE, 139847623225344, 139847625334783, +ERASE, 139847623225344, 139847625334783, +STORE, 139847625322496, 139847625330687, +STORE, 139847625330688, 139847625334783, +STORE, 140720542547968, 140720542552063, +STORE, 140720542535680, 140720542547967, +STORE, 139847625293824, 139847625322495, +STORE, 139847625285632, 139847625293823, +STORE, 139847619284992, 139847623081983, +SNULL, 139847619284992, 139847620943871, +STORE, 139847620943872, 139847623081983, +STORE, 139847619284992, 139847620943871, +SNULL, 139847623041023, 139847623081983, +STORE, 139847620943872, 139847623041023, +STORE, 139847623041024, 139847623081983, +SNULL, 139847623041024, 139847623065599, +STORE, 139847623065600, 139847623081983, +STORE, 139847623041024, 139847623065599, +ERASE, 139847623041024, 139847623065599, +STORE, 139847623041024, 139847623065599, +ERASE, 139847623065600, 139847623081983, +STORE, 139847623065600, 139847623081983, +SNULL, 139847623057407, 139847623065599, +STORE, 139847623041024, 139847623057407, +STORE, 139847623057408, 139847623065599, +SNULL, 94203605630975, 94203605635071, +STORE, 94203605622784, 94203605630975, +STORE, 94203605630976, 94203605635071, +SNULL, 139847625326591, 139847625330687, +STORE, 139847625322496, 139847625326591, +STORE, 139847625326592, 139847625330687, +ERASE, 139847625293824, 139847625322495, +STORE, 94203634880512, 94203635015679, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721428738048, 140737488351231, +SNULL, 140721428746239, 140737488351231, +STORE, 140721428738048, 140721428746239, +STORE, 140721428606976, 140721428746239, +STORE, 93968808378368, 93968810602495, +SNULL, 93968808488959, 93968810602495, +STORE, 93968808378368, 93968808488959, +STORE, 93968808488960, 93968810602495, +ERASE, 93968808488960, 93968810602495, +STORE, 93968810582016, 93968810594303, +STORE, 93968810594304, 93968810602495, +STORE, 140397757026304, 140397759279103, +SNULL, 140397757169663, 140397759279103, +STORE, 140397757026304, 140397757169663, +STORE, 140397757169664, 140397759279103, +ERASE, 140397757169664, 140397759279103, +STORE, 140397759266816, 140397759275007, +STORE, 140397759275008, 140397759279103, +STORE, 140721430368256, 140721430372351, +STORE, 140721430355968, 140721430368255, +STORE, 140397759238144, 140397759266815, +STORE, 140397759229952, 140397759238143, +STORE, 140397753229312, 140397757026303, +SNULL, 140397753229312, 140397754888191, +STORE, 140397754888192, 140397757026303, +STORE, 140397753229312, 140397754888191, +SNULL, 140397756985343, 140397757026303, +STORE, 140397754888192, 140397756985343, +STORE, 140397756985344, 140397757026303, +SNULL, 140397756985344, 140397757009919, +STORE, 140397757009920, 140397757026303, +STORE, 140397756985344, 140397757009919, +ERASE, 140397756985344, 140397757009919, +STORE, 140397756985344, 140397757009919, +ERASE, 140397757009920, 140397757026303, +STORE, 140397757009920, 140397757026303, +SNULL, 140397757001727, 140397757009919, +STORE, 140397756985344, 140397757001727, +STORE, 140397757001728, 140397757009919, +SNULL, 93968810590207, 93968810594303, +STORE, 93968810582016, 93968810590207, +STORE, 93968810590208, 93968810594303, +SNULL, 140397759270911, 140397759275007, +STORE, 140397759266816, 140397759270911, +STORE, 140397759270912, 140397759275007, +ERASE, 140397759238144, 140397759266815, +STORE, 93968837025792, 93968837160959, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721751044096, 140737488351231, +SNULL, 140721751052287, 140737488351231, +STORE, 140721751044096, 140721751052287, +STORE, 140721750913024, 140721751052287, +STORE, 94426051657728, 94426053881855, +SNULL, 94426051768319, 94426053881855, +STORE, 94426051657728, 94426051768319, +STORE, 94426051768320, 94426053881855, +ERASE, 94426051768320, 94426053881855, +STORE, 94426053861376, 94426053873663, +STORE, 94426053873664, 94426053881855, +STORE, 140228456181760, 140228458434559, +SNULL, 140228456325119, 140228458434559, +STORE, 140228456181760, 140228456325119, +STORE, 140228456325120, 140228458434559, +ERASE, 140228456325120, 140228458434559, +STORE, 140228458422272, 140228458430463, +STORE, 140228458430464, 140228458434559, +STORE, 140721751117824, 140721751121919, +STORE, 140721751105536, 140721751117823, +STORE, 140228458393600, 140228458422271, +STORE, 140228458385408, 140228458393599, +STORE, 140228452384768, 140228456181759, +SNULL, 140228452384768, 140228454043647, +STORE, 140228454043648, 140228456181759, +STORE, 140228452384768, 140228454043647, +SNULL, 140228456140799, 140228456181759, +STORE, 140228454043648, 140228456140799, +STORE, 140228456140800, 140228456181759, +SNULL, 140228456140800, 140228456165375, +STORE, 140228456165376, 140228456181759, +STORE, 140228456140800, 140228456165375, +ERASE, 140228456140800, 140228456165375, +STORE, 140228456140800, 140228456165375, +ERASE, 140228456165376, 140228456181759, +STORE, 140228456165376, 140228456181759, +SNULL, 140228456157183, 140228456165375, +STORE, 140228456140800, 140228456157183, +STORE, 140228456157184, 140228456165375, +SNULL, 94426053869567, 94426053873663, +STORE, 94426053861376, 94426053869567, +STORE, 94426053869568, 94426053873663, +SNULL, 140228458426367, 140228458430463, +STORE, 140228458422272, 140228458426367, +STORE, 140228458426368, 140228458430463, +ERASE, 140228458393600, 140228458422271, +STORE, 94426073681920, 94426073817087, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732727623680, 140737488351231, +SNULL, 140732727631871, 140737488351231, +STORE, 140732727623680, 140732727631871, +STORE, 140732727492608, 140732727631871, +STORE, 94537485996032, 94537488220159, +SNULL, 94537486106623, 94537488220159, +STORE, 94537485996032, 94537486106623, +STORE, 94537486106624, 94537488220159, +ERASE, 94537486106624, 94537488220159, +STORE, 94537488199680, 94537488211967, +STORE, 94537488211968, 94537488220159, +STORE, 140446578036736, 140446580289535, +SNULL, 140446578180095, 140446580289535, +STORE, 140446578036736, 140446578180095, +STORE, 140446578180096, 140446580289535, +ERASE, 140446578180096, 140446580289535, +STORE, 140446580277248, 140446580285439, +STORE, 140446580285440, 140446580289535, +STORE, 140732727758848, 140732727762943, +STORE, 140732727746560, 140732727758847, +STORE, 140446580248576, 140446580277247, +STORE, 140446580240384, 140446580248575, +STORE, 140446574239744, 140446578036735, +SNULL, 140446574239744, 140446575898623, +STORE, 140446575898624, 140446578036735, +STORE, 140446574239744, 140446575898623, +SNULL, 140446577995775, 140446578036735, +STORE, 140446575898624, 140446577995775, +STORE, 140446577995776, 140446578036735, +SNULL, 140446577995776, 140446578020351, +STORE, 140446578020352, 140446578036735, +STORE, 140446577995776, 140446578020351, +ERASE, 140446577995776, 140446578020351, +STORE, 140446577995776, 140446578020351, +ERASE, 140446578020352, 140446578036735, +STORE, 140446578020352, 140446578036735, +SNULL, 140446578012159, 140446578020351, +STORE, 140446577995776, 140446578012159, +STORE, 140446578012160, 140446578020351, +SNULL, 94537488207871, 94537488211967, +STORE, 94537488199680, 94537488207871, +STORE, 94537488207872, 94537488211967, +SNULL, 140446580281343, 140446580285439, +STORE, 140446580277248, 140446580281343, +STORE, 140446580281344, 140446580285439, +ERASE, 140446580248576, 140446580277247, +STORE, 94537489014784, 94537489149951, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728766808064, 140737488351231, +SNULL, 140728766816255, 140737488351231, +STORE, 140728766808064, 140728766816255, +STORE, 140728766676992, 140728766816255, +STORE, 94418513866752, 94418516090879, +SNULL, 94418513977343, 94418516090879, +STORE, 94418513866752, 94418513977343, +STORE, 94418513977344, 94418516090879, +ERASE, 94418513977344, 94418516090879, +STORE, 94418516070400, 94418516082687, +STORE, 94418516082688, 94418516090879, +STORE, 140556479520768, 140556481773567, +SNULL, 140556479664127, 140556481773567, +STORE, 140556479520768, 140556479664127, +STORE, 140556479664128, 140556481773567, +ERASE, 140556479664128, 140556481773567, +STORE, 140556481761280, 140556481769471, +STORE, 140556481769472, 140556481773567, +STORE, 140728767148032, 140728767152127, +STORE, 140728767135744, 140728767148031, +STORE, 140556481732608, 140556481761279, +STORE, 140556481724416, 140556481732607, +STORE, 140556475723776, 140556479520767, +SNULL, 140556475723776, 140556477382655, +STORE, 140556477382656, 140556479520767, +STORE, 140556475723776, 140556477382655, +SNULL, 140556479479807, 140556479520767, +STORE, 140556477382656, 140556479479807, +STORE, 140556479479808, 140556479520767, +SNULL, 140556479479808, 140556479504383, +STORE, 140556479504384, 140556479520767, +STORE, 140556479479808, 140556479504383, +ERASE, 140556479479808, 140556479504383, +STORE, 140556479479808, 140556479504383, +ERASE, 140556479504384, 140556479520767, +STORE, 140556479504384, 140556479520767, +SNULL, 140556479496191, 140556479504383, +STORE, 140556479479808, 140556479496191, +STORE, 140556479496192, 140556479504383, +SNULL, 94418516078591, 94418516082687, +STORE, 94418516070400, 94418516078591, +STORE, 94418516078592, 94418516082687, +SNULL, 140556481765375, 140556481769471, +STORE, 140556481761280, 140556481765375, +STORE, 140556481765376, 140556481769471, +ERASE, 140556481732608, 140556481761279, +STORE, 94418541113344, 94418541248511, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723945873408, 140737488351231, +SNULL, 140723945881599, 140737488351231, +STORE, 140723945873408, 140723945881599, +STORE, 140723945742336, 140723945881599, +STORE, 94543169773568, 94543171997695, +SNULL, 94543169884159, 94543171997695, +STORE, 94543169773568, 94543169884159, +STORE, 94543169884160, 94543171997695, +ERASE, 94543169884160, 94543171997695, +STORE, 94543171977216, 94543171989503, +STORE, 94543171989504, 94543171997695, +STORE, 139890420883456, 139890423136255, +SNULL, 139890421026815, 139890423136255, +STORE, 139890420883456, 139890421026815, +STORE, 139890421026816, 139890423136255, +ERASE, 139890421026816, 139890423136255, +STORE, 139890423123968, 139890423132159, +STORE, 139890423132160, 139890423136255, +STORE, 140723946102784, 140723946106879, +STORE, 140723946090496, 140723946102783, +STORE, 139890423095296, 139890423123967, +STORE, 139890423087104, 139890423095295, +STORE, 139890417086464, 139890420883455, +SNULL, 139890417086464, 139890418745343, +STORE, 139890418745344, 139890420883455, +STORE, 139890417086464, 139890418745343, +SNULL, 139890420842495, 139890420883455, +STORE, 139890418745344, 139890420842495, +STORE, 139890420842496, 139890420883455, +SNULL, 139890420842496, 139890420867071, +STORE, 139890420867072, 139890420883455, +STORE, 139890420842496, 139890420867071, +ERASE, 139890420842496, 139890420867071, +STORE, 139890420842496, 139890420867071, +ERASE, 139890420867072, 139890420883455, +STORE, 139890420867072, 139890420883455, +SNULL, 139890420858879, 139890420867071, +STORE, 139890420842496, 139890420858879, +STORE, 139890420858880, 139890420867071, +SNULL, 94543171985407, 94543171989503, +STORE, 94543171977216, 94543171985407, +STORE, 94543171985408, 94543171989503, +SNULL, 139890423128063, 139890423132159, +STORE, 139890423123968, 139890423128063, +STORE, 139890423128064, 139890423132159, +ERASE, 139890423095296, 139890423123967, +STORE, 94543197097984, 94543197233151, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140736205979648, 140737488351231, +SNULL, 140736205987839, 140737488351231, +STORE, 140736205979648, 140736205987839, +STORE, 140736205848576, 140736205987839, +STORE, 94913209913344, 94913212137471, +SNULL, 94913210023935, 94913212137471, +STORE, 94913209913344, 94913210023935, +STORE, 94913210023936, 94913212137471, +ERASE, 94913210023936, 94913212137471, +STORE, 94913212116992, 94913212129279, +STORE, 94913212129280, 94913212137471, +STORE, 140006323052544, 140006325305343, +SNULL, 140006323195903, 140006325305343, +STORE, 140006323052544, 140006323195903, +STORE, 140006323195904, 140006325305343, +ERASE, 140006323195904, 140006325305343, +STORE, 140006325293056, 140006325301247, +STORE, 140006325301248, 140006325305343, +STORE, 140736206716928, 140736206721023, +STORE, 140736206704640, 140736206716927, +STORE, 140006325264384, 140006325293055, +STORE, 140006325256192, 140006325264383, +STORE, 140006319255552, 140006323052543, +SNULL, 140006319255552, 140006320914431, +STORE, 140006320914432, 140006323052543, +STORE, 140006319255552, 140006320914431, +SNULL, 140006323011583, 140006323052543, +STORE, 140006320914432, 140006323011583, +STORE, 140006323011584, 140006323052543, +SNULL, 140006323011584, 140006323036159, +STORE, 140006323036160, 140006323052543, +STORE, 140006323011584, 140006323036159, +ERASE, 140006323011584, 140006323036159, +STORE, 140006323011584, 140006323036159, +ERASE, 140006323036160, 140006323052543, +STORE, 140006323036160, 140006323052543, +SNULL, 140006323027967, 140006323036159, +STORE, 140006323011584, 140006323027967, +STORE, 140006323027968, 140006323036159, +SNULL, 94913212125183, 94913212129279, +STORE, 94913212116992, 94913212125183, +STORE, 94913212125184, 94913212129279, +SNULL, 140006325297151, 140006325301247, +STORE, 140006325293056, 140006325297151, +STORE, 140006325297152, 140006325301247, +ERASE, 140006325264384, 140006325293055, +STORE, 94913239932928, 94913240068095, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140726926897152, 140737488351231, +SNULL, 140726926905343, 140737488351231, +STORE, 140726926897152, 140726926905343, +STORE, 140726926766080, 140726926905343, +STORE, 94213246820352, 94213249044479, +SNULL, 94213246930943, 94213249044479, +STORE, 94213246820352, 94213246930943, +STORE, 94213246930944, 94213249044479, +ERASE, 94213246930944, 94213249044479, +STORE, 94213249024000, 94213249036287, +STORE, 94213249036288, 94213249044479, +STORE, 140368830242816, 140368832495615, +SNULL, 140368830386175, 140368832495615, +STORE, 140368830242816, 140368830386175, +STORE, 140368830386176, 140368832495615, +ERASE, 140368830386176, 140368832495615, +STORE, 140368832483328, 140368832491519, +STORE, 140368832491520, 140368832495615, +STORE, 140726926999552, 140726927003647, +STORE, 140726926987264, 140726926999551, +STORE, 140368832454656, 140368832483327, +STORE, 140368832446464, 140368832454655, +STORE, 140368826445824, 140368830242815, +SNULL, 140368826445824, 140368828104703, +STORE, 140368828104704, 140368830242815, +STORE, 140368826445824, 140368828104703, +SNULL, 140368830201855, 140368830242815, +STORE, 140368828104704, 140368830201855, +STORE, 140368830201856, 140368830242815, +SNULL, 140368830201856, 140368830226431, +STORE, 140368830226432, 140368830242815, +STORE, 140368830201856, 140368830226431, +ERASE, 140368830201856, 140368830226431, +STORE, 140368830201856, 140368830226431, +ERASE, 140368830226432, 140368830242815, +STORE, 140368830226432, 140368830242815, +SNULL, 140368830218239, 140368830226431, +STORE, 140368830201856, 140368830218239, +STORE, 140368830218240, 140368830226431, +SNULL, 94213249032191, 94213249036287, +STORE, 94213249024000, 94213249032191, +STORE, 94213249032192, 94213249036287, +SNULL, 140368832487423, 140368832491519, +STORE, 140368832483328, 140368832487423, +STORE, 140368832487424, 140368832491519, +ERASE, 140368832454656, 140368832483327, +STORE, 94213267435520, 94213267570687, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728954130432, 140737488351231, +SNULL, 140728954138623, 140737488351231, +STORE, 140728954130432, 140728954138623, +STORE, 140728953999360, 140728954138623, +STORE, 94672570966016, 94672573190143, +SNULL, 94672571076607, 94672573190143, +STORE, 94672570966016, 94672571076607, +STORE, 94672571076608, 94672573190143, +ERASE, 94672571076608, 94672573190143, +STORE, 94672573169664, 94672573181951, +STORE, 94672573181952, 94672573190143, +STORE, 140201696735232, 140201698988031, +SNULL, 140201696878591, 140201698988031, +STORE, 140201696735232, 140201696878591, +STORE, 140201696878592, 140201698988031, +ERASE, 140201696878592, 140201698988031, +STORE, 140201698975744, 140201698983935, +STORE, 140201698983936, 140201698988031, +STORE, 140728954163200, 140728954167295, +STORE, 140728954150912, 140728954163199, +STORE, 140201698947072, 140201698975743, +STORE, 140201698938880, 140201698947071, +STORE, 140201692938240, 140201696735231, +SNULL, 140201692938240, 140201694597119, +STORE, 140201694597120, 140201696735231, +STORE, 140201692938240, 140201694597119, +SNULL, 140201696694271, 140201696735231, +STORE, 140201694597120, 140201696694271, +STORE, 140201696694272, 140201696735231, +SNULL, 140201696694272, 140201696718847, +STORE, 140201696718848, 140201696735231, +STORE, 140201696694272, 140201696718847, +ERASE, 140201696694272, 140201696718847, +STORE, 140201696694272, 140201696718847, +ERASE, 140201696718848, 140201696735231, +STORE, 140201696718848, 140201696735231, +SNULL, 140201696710655, 140201696718847, +STORE, 140201696694272, 140201696710655, +STORE, 140201696710656, 140201696718847, +SNULL, 94672573177855, 94672573181951, +STORE, 94672573169664, 94672573177855, +STORE, 94672573177856, 94672573181951, +SNULL, 140201698979839, 140201698983935, +STORE, 140201698975744, 140201698979839, +STORE, 140201698979840, 140201698983935, +ERASE, 140201698947072, 140201698975743, +STORE, 94672595689472, 94672595824639, +STORE, 94114394132480, 94114394345471, +STORE, 94114396442624, 94114396446719, +STORE, 94114396446720, 94114396454911, +STORE, 94114396454912, 94114396467199, +STORE, 94114421575680, 94114428256255, +STORE, 139934313955328, 139934315614207, +STORE, 139934315614208, 139934317711359, +STORE, 139934317711360, 139934317727743, +STORE, 139934317727744, 139934317735935, +STORE, 139934317735936, 139934317752319, +STORE, 139934317752320, 139934317764607, +STORE, 139934317764608, 139934319857663, +STORE, 139934319857664, 139934319861759, +STORE, 139934319861760, 139934319865855, +STORE, 139934319865856, 139934320009215, +STORE, 139934320377856, 139934322061311, +STORE, 139934322061312, 139934322077695, +STORE, 139934322106368, 139934322110463, +STORE, 139934322110464, 139934322114559, +STORE, 139934322114560, 139934322118655, +STORE, 140731200376832, 140731200516095, +STORE, 140731200929792, 140731200942079, +STORE, 140731200942080, 140731200946175, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721532362752, 140737488351231, +SNULL, 140721532370943, 140737488351231, +STORE, 140721532362752, 140721532370943, +STORE, 140721532231680, 140721532370943, +STORE, 94467222597632, 94467224821759, +SNULL, 94467222708223, 94467224821759, +STORE, 94467222597632, 94467222708223, +STORE, 94467222708224, 94467224821759, +ERASE, 94467222708224, 94467224821759, +STORE, 94467224801280, 94467224813567, +STORE, 94467224813568, 94467224821759, +STORE, 140191433543680, 140191435796479, +SNULL, 140191433687039, 140191435796479, +STORE, 140191433543680, 140191433687039, +STORE, 140191433687040, 140191435796479, +ERASE, 140191433687040, 140191435796479, +STORE, 140191435784192, 140191435792383, +STORE, 140191435792384, 140191435796479, +STORE, 140721533034496, 140721533038591, +STORE, 140721533022208, 140721533034495, +STORE, 140191435755520, 140191435784191, +STORE, 140191435747328, 140191435755519, +STORE, 140191429746688, 140191433543679, +SNULL, 140191429746688, 140191431405567, +STORE, 140191431405568, 140191433543679, +STORE, 140191429746688, 140191431405567, +SNULL, 140191433502719, 140191433543679, +STORE, 140191431405568, 140191433502719, +STORE, 140191433502720, 140191433543679, +SNULL, 140191433502720, 140191433527295, +STORE, 140191433527296, 140191433543679, +STORE, 140191433502720, 140191433527295, +ERASE, 140191433502720, 140191433527295, +STORE, 140191433502720, 140191433527295, +ERASE, 140191433527296, 140191433543679, +STORE, 140191433527296, 140191433543679, +SNULL, 140191433519103, 140191433527295, +STORE, 140191433502720, 140191433519103, +STORE, 140191433519104, 140191433527295, +SNULL, 94467224809471, 94467224813567, +STORE, 94467224801280, 94467224809471, +STORE, 94467224809472, 94467224813567, +SNULL, 140191435788287, 140191435792383, +STORE, 140191435784192, 140191435788287, +STORE, 140191435788288, 140191435792383, +ERASE, 140191435755520, 140191435784191, +STORE, 94467251847168, 94467251982335, +STORE, 94367895400448, 94367895613439, +STORE, 94367897710592, 94367897714687, +STORE, 94367897714688, 94367897722879, +STORE, 94367897722880, 94367897735167, +STORE, 94367925264384, 94367926861823, +STORE, 139801317548032, 139801319206911, +STORE, 139801319206912, 139801321304063, +STORE, 139801321304064, 139801321320447, +STORE, 139801321320448, 139801321328639, +STORE, 139801321328640, 139801321345023, +STORE, 139801321345024, 139801321357311, +STORE, 139801321357312, 139801323450367, +STORE, 139801323450368, 139801323454463, +STORE, 139801323454464, 139801323458559, +STORE, 139801323458560, 139801323601919, +STORE, 139801323970560, 139801325654015, +STORE, 139801325654016, 139801325670399, +STORE, 139801325699072, 139801325703167, +STORE, 139801325703168, 139801325707263, +STORE, 139801325707264, 139801325711359, +STORE, 140724442861568, 140724443000831, +STORE, 140724443611136, 140724443623423, +STORE, 140724443623424, 140724443627519, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731353149440, 140737488351231, +SNULL, 140731353157631, 140737488351231, +STORE, 140731353149440, 140731353157631, +STORE, 140731353018368, 140731353157631, +STORE, 94310379503616, 94310381838335, +SNULL, 94310379716607, 94310381838335, +STORE, 94310379503616, 94310379716607, +STORE, 94310379716608, 94310381838335, +ERASE, 94310379716608, 94310381838335, +STORE, 94310381813760, 94310381826047, +STORE, 94310381826048, 94310381838335, +STORE, 140515434659840, 140515436912639, +SNULL, 140515434803199, 140515436912639, +STORE, 140515434659840, 140515434803199, +STORE, 140515434803200, 140515436912639, +ERASE, 140515434803200, 140515436912639, +STORE, 140515436900352, 140515436908543, +STORE, 140515436908544, 140515436912639, +STORE, 140731353886720, 140731353890815, +STORE, 140731353874432, 140731353886719, +STORE, 140515436871680, 140515436900351, +STORE, 140515436863488, 140515436871679, +STORE, 140515432546304, 140515434659839, +SNULL, 140515432546304, 140515432558591, +STORE, 140515432558592, 140515434659839, +STORE, 140515432546304, 140515432558591, +SNULL, 140515434651647, 140515434659839, +STORE, 140515432558592, 140515434651647, +STORE, 140515434651648, 140515434659839, +ERASE, 140515434651648, 140515434659839, +STORE, 140515434651648, 140515434659839, +STORE, 140515428749312, 140515432546303, +SNULL, 140515428749312, 140515430408191, +STORE, 140515430408192, 140515432546303, +STORE, 140515428749312, 140515430408191, +SNULL, 140515432505343, 140515432546303, +STORE, 140515430408192, 140515432505343, +STORE, 140515432505344, 140515432546303, +SNULL, 140515432505344, 140515432529919, +STORE, 140515432529920, 140515432546303, +STORE, 140515432505344, 140515432529919, +ERASE, 140515432505344, 140515432529919, +STORE, 140515432505344, 140515432529919, +ERASE, 140515432529920, 140515432546303, +STORE, 140515432529920, 140515432546303, +STORE, 140515436855296, 140515436871679, +SNULL, 140515432521727, 140515432529919, +STORE, 140515432505344, 140515432521727, +STORE, 140515432521728, 140515432529919, +SNULL, 140515434655743, 140515434659839, +STORE, 140515434651648, 140515434655743, +STORE, 140515434655744, 140515434659839, +SNULL, 94310381817855, 94310381826047, +STORE, 94310381813760, 94310381817855, +STORE, 94310381817856, 94310381826047, +SNULL, 140515436904447, 140515436908543, +STORE, 140515436900352, 140515436904447, +STORE, 140515436904448, 140515436908543, +ERASE, 140515436871680, 140515436900351, +STORE, 94310395457536, 94310395592703, +STORE, 140515435171840, 140515436855295, +STORE, 94310395457536, 94310395727871, +STORE, 94310395457536, 94310395863039, +STORE, 94310395457536, 94310396047359, +SNULL, 94310396022783, 94310396047359, +STORE, 94310395457536, 94310396022783, +STORE, 94310396022784, 94310396047359, +ERASE, 94310396022784, 94310396047359, +STORE, 94310395457536, 94310396157951, +STORE, 94310395457536, 94310396293119, +SNULL, 94310396276735, 94310396293119, +STORE, 94310395457536, 94310396276735, +STORE, 94310396276736, 94310396293119, +ERASE, 94310396276736, 94310396293119, +STORE, 94310395457536, 94310396411903, +SNULL, 94310396383231, 94310396411903, +STORE, 94310395457536, 94310396383231, +STORE, 94310396383232, 94310396411903, +ERASE, 94310396383232, 94310396411903, +STORE, 94310395457536, 94310396522495, +STORE, 94310395457536, 94310396674047, +SNULL, 94310396657663, 94310396674047, +STORE, 94310395457536, 94310396657663, +STORE, 94310396657664, 94310396674047, +ERASE, 94310396657664, 94310396674047, +SNULL, 94310396624895, 94310396657663, +STORE, 94310395457536, 94310396624895, +STORE, 94310396624896, 94310396657663, +ERASE, 94310396624896, 94310396657663, +STORE, 94310395457536, 94310396776447, +SNULL, 94310396764159, 94310396776447, +STORE, 94310395457536, 94310396764159, +STORE, 94310396764160, 94310396776447, +ERASE, 94310396764160, 94310396776447, +SNULL, 94310396739583, 94310396764159, +STORE, 94310395457536, 94310396739583, +STORE, 94310396739584, 94310396764159, +ERASE, 94310396739584, 94310396764159, +STORE, 94310395457536, 94310396882943, +STORE, 94310395457536, 94310397018111, +STORE, 94310395457536, 94310397161471, +STORE, 94310395457536, 94310397300735, +SNULL, 94310397292543, 94310397300735, +STORE, 94310395457536, 94310397292543, +STORE, 94310397292544, 94310397300735, +ERASE, 94310397292544, 94310397300735, +STORE, 94359222210560, 94359222423551, +STORE, 94359224520704, 94359224524799, +STORE, 94359224524800, 94359224532991, +STORE, 94359224532992, 94359224545279, +STORE, 94359238348800, 94359239385087, +STORE, 140675699838976, 140675701497855, +STORE, 140675701497856, 140675703595007, +STORE, 140675703595008, 140675703611391, +STORE, 140675703611392, 140675703619583, +STORE, 140675703619584, 140675703635967, +STORE, 140675703635968, 140675703648255, +STORE, 140675703648256, 140675705741311, +STORE, 140675705741312, 140675705745407, +STORE, 140675705745408, 140675705749503, +STORE, 140675705749504, 140675705892863, +STORE, 140675706261504, 140675707944959, +STORE, 140675707944960, 140675707961343, +STORE, 140675707990016, 140675707994111, +STORE, 140675707994112, 140675707998207, +STORE, 140675707998208, 140675708002303, +STORE, 140721324634112, 140721324773375, +STORE, 140721324810240, 140721324822527, +STORE, 140721324822528, 140721324826623, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724099678208, 140737488351231, +SNULL, 140724099686399, 140737488351231, +STORE, 140724099678208, 140724099686399, +STORE, 140724099547136, 140724099686399, +STORE, 94586638516224, 94586640850943, +SNULL, 94586638729215, 94586640850943, +STORE, 94586638516224, 94586638729215, +STORE, 94586638729216, 94586640850943, +ERASE, 94586638729216, 94586640850943, +STORE, 94586640826368, 94586640838655, +STORE, 94586640838656, 94586640850943, +STORE, 140371033796608, 140371036049407, +SNULL, 140371033939967, 140371036049407, +STORE, 140371033796608, 140371033939967, +STORE, 140371033939968, 140371036049407, +ERASE, 140371033939968, 140371036049407, +STORE, 140371036037120, 140371036045311, +STORE, 140371036045312, 140371036049407, +STORE, 140724100001792, 140724100005887, +STORE, 140724099989504, 140724100001791, +STORE, 140371036008448, 140371036037119, +STORE, 140371036000256, 140371036008447, +STORE, 140371031683072, 140371033796607, +SNULL, 140371031683072, 140371031695359, +STORE, 140371031695360, 140371033796607, +STORE, 140371031683072, 140371031695359, +SNULL, 140371033788415, 140371033796607, +STORE, 140371031695360, 140371033788415, +STORE, 140371033788416, 140371033796607, +ERASE, 140371033788416, 140371033796607, +STORE, 140371033788416, 140371033796607, +STORE, 140371027886080, 140371031683071, +SNULL, 140371027886080, 140371029544959, +STORE, 140371029544960, 140371031683071, +STORE, 140371027886080, 140371029544959, +SNULL, 140371031642111, 140371031683071, +STORE, 140371029544960, 140371031642111, +STORE, 140371031642112, 140371031683071, +SNULL, 140371031642112, 140371031666687, +STORE, 140371031666688, 140371031683071, +STORE, 140371031642112, 140371031666687, +ERASE, 140371031642112, 140371031666687, +STORE, 140371031642112, 140371031666687, +ERASE, 140371031666688, 140371031683071, +STORE, 140371031666688, 140371031683071, +STORE, 140371035992064, 140371036008447, +SNULL, 140371031658495, 140371031666687, +STORE, 140371031642112, 140371031658495, +STORE, 140371031658496, 140371031666687, +SNULL, 140371033792511, 140371033796607, +STORE, 140371033788416, 140371033792511, +STORE, 140371033792512, 140371033796607, +SNULL, 94586640830463, 94586640838655, +STORE, 94586640826368, 94586640830463, +STORE, 94586640830464, 94586640838655, +SNULL, 140371036041215, 140371036045311, +STORE, 140371036037120, 140371036041215, +STORE, 140371036041216, 140371036045311, +ERASE, 140371036008448, 140371036037119, +STORE, 94586663849984, 94586663985151, +STORE, 140371034308608, 140371035992063, +STORE, 94586663849984, 94586664120319, +STORE, 94586663849984, 94586664255487, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140727532937216, 140737488351231, +SNULL, 140727532945407, 140737488351231, +STORE, 140727532937216, 140727532945407, +STORE, 140727532806144, 140727532945407, +STORE, 94849780191232, 94849782525951, +SNULL, 94849780404223, 94849782525951, +STORE, 94849780191232, 94849780404223, +STORE, 94849780404224, 94849782525951, +ERASE, 94849780404224, 94849782525951, +STORE, 94849782501376, 94849782513663, +STORE, 94849782513664, 94849782525951, +STORE, 140382070218752, 140382072471551, +SNULL, 140382070362111, 140382072471551, +STORE, 140382070218752, 140382070362111, +STORE, 140382070362112, 140382072471551, +ERASE, 140382070362112, 140382072471551, +STORE, 140382072459264, 140382072467455, +STORE, 140382072467456, 140382072471551, +STORE, 140727533092864, 140727533096959, +STORE, 140727533080576, 140727533092863, +STORE, 140382072430592, 140382072459263, +STORE, 140382072422400, 140382072430591, +STORE, 140382068105216, 140382070218751, +SNULL, 140382068105216, 140382068117503, +STORE, 140382068117504, 140382070218751, +STORE, 140382068105216, 140382068117503, +SNULL, 140382070210559, 140382070218751, +STORE, 140382068117504, 140382070210559, +STORE, 140382070210560, 140382070218751, +ERASE, 140382070210560, 140382070218751, +STORE, 140382070210560, 140382070218751, +STORE, 140382064308224, 140382068105215, +SNULL, 140382064308224, 140382065967103, +STORE, 140382065967104, 140382068105215, +STORE, 140382064308224, 140382065967103, +SNULL, 140382068064255, 140382068105215, +STORE, 140382065967104, 140382068064255, +STORE, 140382068064256, 140382068105215, +SNULL, 140382068064256, 140382068088831, +STORE, 140382068088832, 140382068105215, +STORE, 140382068064256, 140382068088831, +ERASE, 140382068064256, 140382068088831, +STORE, 140382068064256, 140382068088831, +ERASE, 140382068088832, 140382068105215, +STORE, 140382068088832, 140382068105215, +STORE, 140382072414208, 140382072430591, +SNULL, 140382068080639, 140382068088831, +STORE, 140382068064256, 140382068080639, +STORE, 140382068080640, 140382068088831, +SNULL, 140382070214655, 140382070218751, +STORE, 140382070210560, 140382070214655, +STORE, 140382070214656, 140382070218751, +SNULL, 94849782505471, 94849782513663, +STORE, 94849782501376, 94849782505471, +STORE, 94849782505472, 94849782513663, +SNULL, 140382072463359, 140382072467455, +STORE, 140382072459264, 140382072463359, +STORE, 140382072463360, 140382072467455, +ERASE, 140382072430592, 140382072459263, +STORE, 94849782845440, 94849782980607, +STORE, 140382070730752, 140382072414207, +STORE, 94849782845440, 94849783115775, +STORE, 94849782845440, 94849783250943, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722594377728, 140737488351231, +SNULL, 140722594385919, 140737488351231, +STORE, 140722594377728, 140722594385919, +STORE, 140722594246656, 140722594385919, +STORE, 94421466353664, 94421468577791, +SNULL, 94421466464255, 94421468577791, +STORE, 94421466353664, 94421466464255, +STORE, 94421466464256, 94421468577791, +ERASE, 94421466464256, 94421468577791, +STORE, 94421468557312, 94421468569599, +STORE, 94421468569600, 94421468577791, +STORE, 140345458057216, 140345460310015, +SNULL, 140345458200575, 140345460310015, +STORE, 140345458057216, 140345458200575, +STORE, 140345458200576, 140345460310015, +ERASE, 140345458200576, 140345460310015, +STORE, 140345460297728, 140345460305919, +STORE, 140345460305920, 140345460310015, +STORE, 140722595557376, 140722595561471, +STORE, 140722595545088, 140722595557375, +STORE, 140345460269056, 140345460297727, +STORE, 140345460260864, 140345460269055, +STORE, 140345454260224, 140345458057215, +SNULL, 140345454260224, 140345455919103, +STORE, 140345455919104, 140345458057215, +STORE, 140345454260224, 140345455919103, +SNULL, 140345458016255, 140345458057215, +STORE, 140345455919104, 140345458016255, +STORE, 140345458016256, 140345458057215, +SNULL, 140345458016256, 140345458040831, +STORE, 140345458040832, 140345458057215, +STORE, 140345458016256, 140345458040831, +ERASE, 140345458016256, 140345458040831, +STORE, 140345458016256, 140345458040831, +ERASE, 140345458040832, 140345458057215, +STORE, 140345458040832, 140345458057215, +SNULL, 140345458032639, 140345458040831, +STORE, 140345458016256, 140345458032639, +STORE, 140345458032640, 140345458040831, +SNULL, 94421468565503, 94421468569599, +STORE, 94421468557312, 94421468565503, +STORE, 94421468565504, 94421468569599, +SNULL, 140345460301823, 140345460305919, +STORE, 140345460297728, 140345460301823, +STORE, 140345460301824, 140345460305919, +ERASE, 140345460269056, 140345460297727, +STORE, 94421496004608, 94421496139775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140726096302080, 140737488351231, +SNULL, 140726096310271, 140737488351231, +STORE, 140726096302080, 140726096310271, +STORE, 140726096171008, 140726096310271, +STORE, 94101992124416, 94101994459135, +SNULL, 94101992337407, 94101994459135, +STORE, 94101992124416, 94101992337407, +STORE, 94101992337408, 94101994459135, +ERASE, 94101992337408, 94101994459135, +STORE, 94101994434560, 94101994446847, +STORE, 94101994446848, 94101994459135, +STORE, 140192085594112, 140192087846911, +SNULL, 140192085737471, 140192087846911, +STORE, 140192085594112, 140192085737471, +STORE, 140192085737472, 140192087846911, +ERASE, 140192085737472, 140192087846911, +STORE, 140192087834624, 140192087842815, +STORE, 140192087842816, 140192087846911, +STORE, 140726096375808, 140726096379903, +STORE, 140726096363520, 140726096375807, +STORE, 140192087805952, 140192087834623, +STORE, 140192087797760, 140192087805951, +STORE, 140192083480576, 140192085594111, +SNULL, 140192083480576, 140192083492863, +STORE, 140192083492864, 140192085594111, +STORE, 140192083480576, 140192083492863, +SNULL, 140192085585919, 140192085594111, +STORE, 140192083492864, 140192085585919, +STORE, 140192085585920, 140192085594111, +ERASE, 140192085585920, 140192085594111, +STORE, 140192085585920, 140192085594111, +STORE, 140192079683584, 140192083480575, +SNULL, 140192079683584, 140192081342463, +STORE, 140192081342464, 140192083480575, +STORE, 140192079683584, 140192081342463, +SNULL, 140192083439615, 140192083480575, +STORE, 140192081342464, 140192083439615, +STORE, 140192083439616, 140192083480575, +SNULL, 140192083439616, 140192083464191, +STORE, 140192083464192, 140192083480575, +STORE, 140192083439616, 140192083464191, +ERASE, 140192083439616, 140192083464191, +STORE, 140192083439616, 140192083464191, +ERASE, 140192083464192, 140192083480575, +STORE, 140192083464192, 140192083480575, +STORE, 140192087789568, 140192087805951, +SNULL, 140192083455999, 140192083464191, +STORE, 140192083439616, 140192083455999, +STORE, 140192083456000, 140192083464191, +SNULL, 140192085590015, 140192085594111, +STORE, 140192085585920, 140192085590015, +STORE, 140192085590016, 140192085594111, +SNULL, 94101994438655, 94101994446847, +STORE, 94101994434560, 94101994438655, +STORE, 94101994438656, 94101994446847, +SNULL, 140192087838719, 140192087842815, +STORE, 140192087834624, 140192087838719, +STORE, 140192087838720, 140192087842815, +ERASE, 140192087805952, 140192087834623, +STORE, 94102011887616, 94102012022783, +STORE, 140192086106112, 140192087789567, +STORE, 94102011887616, 94102012157951, +STORE, 94102011887616, 94102012293119, +STORE, 94102011887616, 94102012440575, +SNULL, 94102012428287, 94102012440575, +STORE, 94102011887616, 94102012428287, +STORE, 94102012428288, 94102012440575, +ERASE, 94102012428288, 94102012440575, +STORE, 94102011887616, 94102012579839, +STORE, 94102011887616, 94102012715007, +SNULL, 94102012694527, 94102012715007, +STORE, 94102011887616, 94102012694527, +STORE, 94102012694528, 94102012715007, +ERASE, 94102012694528, 94102012715007, +STORE, 94102011887616, 94102012833791, +STORE, 94102011887616, 94102012968959, +SNULL, 94102012927999, 94102012968959, +STORE, 94102011887616, 94102012927999, +STORE, 94102012928000, 94102012968959, +ERASE, 94102012928000, 94102012968959, +STORE, 94102011887616, 94102013091839, +SNULL, 94102013075455, 94102013091839, +STORE, 94102011887616, 94102013075455, +STORE, 94102013075456, 94102013091839, +ERASE, 94102013075456, 94102013091839, +STORE, 94102011887616, 94102013210623, +STORE, 94102011887616, 94102013345791, +STORE, 93968727965696, 93968728178687, +STORE, 93968730275840, 93968730279935, +STORE, 93968730279936, 93968730288127, +STORE, 93968730288128, 93968730300415, +STORE, 93968731140096, 93968732704767, +STORE, 140588443168768, 140588444827647, +STORE, 140588444827648, 140588446924799, +STORE, 140588446924800, 140588446941183, +STORE, 140588446941184, 140588446949375, +STORE, 140588446949376, 140588446965759, +STORE, 140588446965760, 140588446978047, +STORE, 140588446978048, 140588449071103, +STORE, 140588449071104, 140588449075199, +STORE, 140588449075200, 140588449079295, +STORE, 140588449079296, 140588449222655, +STORE, 140588449591296, 140588451274751, +STORE, 140588451274752, 140588451291135, +STORE, 140588451319808, 140588451323903, +STORE, 140588451323904, 140588451327999, +STORE, 140588451328000, 140588451332095, +STORE, 140733877239808, 140733877379071, +STORE, 140733878702080, 140733878714367, +STORE, 140733878714368, 140733878718463, +STORE, 93968727965696, 93968728178687, +STORE, 93968730275840, 93968730279935, +STORE, 93968730279936, 93968730288127, +STORE, 93968730288128, 93968730300415, +STORE, 93968731140096, 93968732991487, +STORE, 140588443168768, 140588444827647, +STORE, 140588444827648, 140588446924799, +STORE, 140588446924800, 140588446941183, +STORE, 140588446941184, 140588446949375, +STORE, 140588446949376, 140588446965759, +STORE, 140588446965760, 140588446978047, +STORE, 140588446978048, 140588449071103, +STORE, 140588449071104, 140588449075199, +STORE, 140588449075200, 140588449079295, +STORE, 140588449079296, 140588449222655, +STORE, 140588449591296, 140588451274751, +STORE, 140588451274752, 140588451291135, +STORE, 140588451319808, 140588451323903, +STORE, 140588451323904, 140588451327999, +STORE, 140588451328000, 140588451332095, +STORE, 140733877239808, 140733877379071, +STORE, 140733878702080, 140733878714367, +STORE, 140733878714368, 140733878718463, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733054472192, 140737488351231, +SNULL, 140733054480383, 140737488351231, +STORE, 140733054472192, 140733054480383, +STORE, 140733054341120, 140733054480383, +STORE, 93992873623552, 93992875847679, +SNULL, 93992873734143, 93992875847679, +STORE, 93992873623552, 93992873734143, +STORE, 93992873734144, 93992875847679, +ERASE, 93992873734144, 93992875847679, +STORE, 93992875827200, 93992875839487, +STORE, 93992875839488, 93992875847679, +STORE, 139790881488896, 139790883741695, +SNULL, 139790881632255, 139790883741695, +STORE, 139790881488896, 139790881632255, +STORE, 139790881632256, 139790883741695, +ERASE, 139790881632256, 139790883741695, +STORE, 139790883729408, 139790883737599, +STORE, 139790883737600, 139790883741695, +STORE, 140733054754816, 140733054758911, +STORE, 140733054742528, 140733054754815, +STORE, 139790883700736, 139790883729407, +STORE, 139790883692544, 139790883700735, +STORE, 139790877691904, 139790881488895, +SNULL, 139790877691904, 139790879350783, +STORE, 139790879350784, 139790881488895, +STORE, 139790877691904, 139790879350783, +SNULL, 139790881447935, 139790881488895, +STORE, 139790879350784, 139790881447935, +STORE, 139790881447936, 139790881488895, +SNULL, 139790881447936, 139790881472511, +STORE, 139790881472512, 139790881488895, +STORE, 139790881447936, 139790881472511, +ERASE, 139790881447936, 139790881472511, +STORE, 139790881447936, 139790881472511, +ERASE, 139790881472512, 139790881488895, +STORE, 139790881472512, 139790881488895, +SNULL, 139790881464319, 139790881472511, +STORE, 139790881447936, 139790881464319, +STORE, 139790881464320, 139790881472511, +SNULL, 93992875835391, 93992875839487, +STORE, 93992875827200, 93992875835391, +STORE, 93992875835392, 93992875839487, +SNULL, 139790883733503, 139790883737599, +STORE, 139790883729408, 139790883733503, +STORE, 139790883733504, 139790883737599, +ERASE, 139790883700736, 139790883729407, +STORE, 93992877031424, 93992877166591, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728550887424, 140737488351231, +SNULL, 140728550895615, 140737488351231, +STORE, 140728550887424, 140728550895615, +STORE, 140728550756352, 140728550895615, +STORE, 94707634077696, 94707636301823, +SNULL, 94707634188287, 94707636301823, +STORE, 94707634077696, 94707634188287, +STORE, 94707634188288, 94707636301823, +ERASE, 94707634188288, 94707636301823, +STORE, 94707636281344, 94707636293631, +STORE, 94707636293632, 94707636301823, +STORE, 140553545666560, 140553547919359, +SNULL, 140553545809919, 140553547919359, +STORE, 140553545666560, 140553545809919, +STORE, 140553545809920, 140553547919359, +ERASE, 140553545809920, 140553547919359, +STORE, 140553547907072, 140553547915263, +STORE, 140553547915264, 140553547919359, +STORE, 140728552374272, 140728552378367, +STORE, 140728552361984, 140728552374271, +STORE, 140553547878400, 140553547907071, +STORE, 140553547870208, 140553547878399, +STORE, 140553541869568, 140553545666559, +SNULL, 140553541869568, 140553543528447, +STORE, 140553543528448, 140553545666559, +STORE, 140553541869568, 140553543528447, +SNULL, 140553545625599, 140553545666559, +STORE, 140553543528448, 140553545625599, +STORE, 140553545625600, 140553545666559, +SNULL, 140553545625600, 140553545650175, +STORE, 140553545650176, 140553545666559, +STORE, 140553545625600, 140553545650175, +ERASE, 140553545625600, 140553545650175, +STORE, 140553545625600, 140553545650175, +ERASE, 140553545650176, 140553545666559, +STORE, 140553545650176, 140553545666559, +SNULL, 140553545641983, 140553545650175, +STORE, 140553545625600, 140553545641983, +STORE, 140553545641984, 140553545650175, +SNULL, 94707636289535, 94707636293631, +STORE, 94707636281344, 94707636289535, +STORE, 94707636289536, 94707636293631, +SNULL, 140553547911167, 140553547915263, +STORE, 140553547907072, 140553547911167, +STORE, 140553547911168, 140553547915263, +ERASE, 140553547878400, 140553547907071, +STORE, 94707651411968, 94707651547135, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732168695808, 140737488351231, +SNULL, 140732168703999, 140737488351231, +STORE, 140732168695808, 140732168703999, +STORE, 140732168564736, 140732168703999, +STORE, 94454287859712, 94454290083839, +SNULL, 94454287970303, 94454290083839, +STORE, 94454287859712, 94454287970303, +STORE, 94454287970304, 94454290083839, +ERASE, 94454287970304, 94454290083839, +STORE, 94454290063360, 94454290075647, +STORE, 94454290075648, 94454290083839, +STORE, 140564947107840, 140564949360639, +SNULL, 140564947251199, 140564949360639, +STORE, 140564947107840, 140564947251199, +STORE, 140564947251200, 140564949360639, +ERASE, 140564947251200, 140564949360639, +STORE, 140564949348352, 140564949356543, +STORE, 140564949356544, 140564949360639, +STORE, 140732168843264, 140732168847359, +STORE, 140732168830976, 140732168843263, +STORE, 140564949319680, 140564949348351, +STORE, 140564949311488, 140564949319679, +STORE, 140564943310848, 140564947107839, +SNULL, 140564943310848, 140564944969727, +STORE, 140564944969728, 140564947107839, +STORE, 140564943310848, 140564944969727, +SNULL, 140564947066879, 140564947107839, +STORE, 140564944969728, 140564947066879, +STORE, 140564947066880, 140564947107839, +SNULL, 140564947066880, 140564947091455, +STORE, 140564947091456, 140564947107839, +STORE, 140564947066880, 140564947091455, +ERASE, 140564947066880, 140564947091455, +STORE, 140564947066880, 140564947091455, +ERASE, 140564947091456, 140564947107839, +STORE, 140564947091456, 140564947107839, +SNULL, 140564947083263, 140564947091455, +STORE, 140564947066880, 140564947083263, +STORE, 140564947083264, 140564947091455, +SNULL, 94454290071551, 94454290075647, +STORE, 94454290063360, 94454290071551, +STORE, 94454290071552, 94454290075647, +SNULL, 140564949352447, 140564949356543, +STORE, 140564949348352, 140564949352447, +STORE, 140564949352448, 140564949356543, +ERASE, 140564949319680, 140564949348351, +STORE, 94454316236800, 94454316371967, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735155617792, 140737488351231, +SNULL, 140735155625983, 140737488351231, +STORE, 140735155617792, 140735155625983, +STORE, 140735155486720, 140735155625983, +STORE, 93915969556480, 93915971780607, +SNULL, 93915969667071, 93915971780607, +STORE, 93915969556480, 93915969667071, +STORE, 93915969667072, 93915971780607, +ERASE, 93915969667072, 93915971780607, +STORE, 93915971760128, 93915971772415, +STORE, 93915971772416, 93915971780607, +STORE, 140141164605440, 140141166858239, +SNULL, 140141164748799, 140141166858239, +STORE, 140141164605440, 140141164748799, +STORE, 140141164748800, 140141166858239, +ERASE, 140141164748800, 140141166858239, +STORE, 140141166845952, 140141166854143, +STORE, 140141166854144, 140141166858239, +STORE, 140735155691520, 140735155695615, +STORE, 140735155679232, 140735155691519, +STORE, 140141166817280, 140141166845951, +STORE, 140141166809088, 140141166817279, +STORE, 140141160808448, 140141164605439, +SNULL, 140141160808448, 140141162467327, +STORE, 140141162467328, 140141164605439, +STORE, 140141160808448, 140141162467327, +SNULL, 140141164564479, 140141164605439, +STORE, 140141162467328, 140141164564479, +STORE, 140141164564480, 140141164605439, +SNULL, 140141164564480, 140141164589055, +STORE, 140141164589056, 140141164605439, +STORE, 140141164564480, 140141164589055, +ERASE, 140141164564480, 140141164589055, +STORE, 140141164564480, 140141164589055, +ERASE, 140141164589056, 140141164605439, +STORE, 140141164589056, 140141164605439, +SNULL, 140141164580863, 140141164589055, +STORE, 140141164564480, 140141164580863, +STORE, 140141164580864, 140141164589055, +SNULL, 93915971768319, 93915971772415, +STORE, 93915971760128, 93915971768319, +STORE, 93915971768320, 93915971772415, +SNULL, 140141166850047, 140141166854143, +STORE, 140141166845952, 140141166850047, +STORE, 140141166850048, 140141166854143, +ERASE, 140141166817280, 140141166845951, +STORE, 93916002775040, 93916002910207, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728988409856, 140737488351231, +SNULL, 140728988418047, 140737488351231, +STORE, 140728988409856, 140728988418047, +STORE, 140728988278784, 140728988418047, +STORE, 94021634813952, 94021637038079, +SNULL, 94021634924543, 94021637038079, +STORE, 94021634813952, 94021634924543, +STORE, 94021634924544, 94021637038079, +ERASE, 94021634924544, 94021637038079, +STORE, 94021637017600, 94021637029887, +STORE, 94021637029888, 94021637038079, +STORE, 140638014038016, 140638016290815, +SNULL, 140638014181375, 140638016290815, +STORE, 140638014038016, 140638014181375, +STORE, 140638014181376, 140638016290815, +ERASE, 140638014181376, 140638016290815, +STORE, 140638016278528, 140638016286719, +STORE, 140638016286720, 140638016290815, +STORE, 140728988536832, 140728988540927, +STORE, 140728988524544, 140728988536831, +STORE, 140638016249856, 140638016278527, +STORE, 140638016241664, 140638016249855, +STORE, 140638010241024, 140638014038015, +SNULL, 140638010241024, 140638011899903, +STORE, 140638011899904, 140638014038015, +STORE, 140638010241024, 140638011899903, +SNULL, 140638013997055, 140638014038015, +STORE, 140638011899904, 140638013997055, +STORE, 140638013997056, 140638014038015, +SNULL, 140638013997056, 140638014021631, +STORE, 140638014021632, 140638014038015, +STORE, 140638013997056, 140638014021631, +ERASE, 140638013997056, 140638014021631, +STORE, 140638013997056, 140638014021631, +ERASE, 140638014021632, 140638014038015, +STORE, 140638014021632, 140638014038015, +SNULL, 140638014013439, 140638014021631, +STORE, 140638013997056, 140638014013439, +STORE, 140638014013440, 140638014021631, +SNULL, 94021637025791, 94021637029887, +STORE, 94021637017600, 94021637025791, +STORE, 94021637025792, 94021637029887, +SNULL, 140638016282623, 140638016286719, +STORE, 140638016278528, 140638016282623, +STORE, 140638016282624, 140638016286719, +ERASE, 140638016249856, 140638016278527, +STORE, 94021643124736, 94021643259903, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731219275776, 140737488351231, +SNULL, 140731219283967, 140737488351231, +STORE, 140731219275776, 140731219283967, +STORE, 140731219144704, 140731219283967, +STORE, 93888803647488, 93888805871615, +SNULL, 93888803758079, 93888805871615, +STORE, 93888803647488, 93888803758079, +STORE, 93888803758080, 93888805871615, +ERASE, 93888803758080, 93888805871615, +STORE, 93888805851136, 93888805863423, +STORE, 93888805863424, 93888805871615, +STORE, 139630576934912, 139630579187711, +SNULL, 139630577078271, 139630579187711, +STORE, 139630576934912, 139630577078271, +STORE, 139630577078272, 139630579187711, +ERASE, 139630577078272, 139630579187711, +STORE, 139630579175424, 139630579183615, +STORE, 139630579183616, 139630579187711, +STORE, 140731219718144, 140731219722239, +STORE, 140731219705856, 140731219718143, +STORE, 139630579146752, 139630579175423, +STORE, 139630579138560, 139630579146751, +STORE, 139630573137920, 139630576934911, +SNULL, 139630573137920, 139630574796799, +STORE, 139630574796800, 139630576934911, +STORE, 139630573137920, 139630574796799, +SNULL, 139630576893951, 139630576934911, +STORE, 139630574796800, 139630576893951, +STORE, 139630576893952, 139630576934911, +SNULL, 139630576893952, 139630576918527, +STORE, 139630576918528, 139630576934911, +STORE, 139630576893952, 139630576918527, +ERASE, 139630576893952, 139630576918527, +STORE, 139630576893952, 139630576918527, +ERASE, 139630576918528, 139630576934911, +STORE, 139630576918528, 139630576934911, +SNULL, 139630576910335, 139630576918527, +STORE, 139630576893952, 139630576910335, +STORE, 139630576910336, 139630576918527, +SNULL, 93888805859327, 93888805863423, +STORE, 93888805851136, 93888805859327, +STORE, 93888805859328, 93888805863423, +SNULL, 139630579179519, 139630579183615, +STORE, 139630579175424, 139630579179519, +STORE, 139630579179520, 139630579183615, +ERASE, 139630579146752, 139630579175423, +STORE, 93888822235136, 93888822370303, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733391151104, 140737488351231, +SNULL, 140733391159295, 140737488351231, +STORE, 140733391151104, 140733391159295, +STORE, 140733391020032, 140733391159295, +STORE, 94393875324928, 94393877549055, +SNULL, 94393875435519, 94393877549055, +STORE, 94393875324928, 94393875435519, +STORE, 94393875435520, 94393877549055, +ERASE, 94393875435520, 94393877549055, +STORE, 94393877528576, 94393877540863, +STORE, 94393877540864, 94393877549055, +STORE, 140292111740928, 140292113993727, +SNULL, 140292111884287, 140292113993727, +STORE, 140292111740928, 140292111884287, +STORE, 140292111884288, 140292113993727, +ERASE, 140292111884288, 140292113993727, +STORE, 140292113981440, 140292113989631, +STORE, 140292113989632, 140292113993727, +STORE, 140733391532032, 140733391536127, +STORE, 140733391519744, 140733391532031, +STORE, 140292113952768, 140292113981439, +STORE, 140292113944576, 140292113952767, +STORE, 140292107943936, 140292111740927, +SNULL, 140292107943936, 140292109602815, +STORE, 140292109602816, 140292111740927, +STORE, 140292107943936, 140292109602815, +SNULL, 140292111699967, 140292111740927, +STORE, 140292109602816, 140292111699967, +STORE, 140292111699968, 140292111740927, +SNULL, 140292111699968, 140292111724543, +STORE, 140292111724544, 140292111740927, +STORE, 140292111699968, 140292111724543, +ERASE, 140292111699968, 140292111724543, +STORE, 140292111699968, 140292111724543, +ERASE, 140292111724544, 140292111740927, +STORE, 140292111724544, 140292111740927, +SNULL, 140292111716351, 140292111724543, +STORE, 140292111699968, 140292111716351, +STORE, 140292111716352, 140292111724543, +SNULL, 94393877536767, 94393877540863, +STORE, 94393877528576, 94393877536767, +STORE, 94393877536768, 94393877540863, +SNULL, 140292113985535, 140292113989631, +STORE, 140292113981440, 140292113985535, +STORE, 140292113985536, 140292113989631, +ERASE, 140292113952768, 140292113981439, +STORE, 94393909342208, 94393909477375, +STORE, 94458367512576, 94458367725567, +STORE, 94458369822720, 94458369826815, +STORE, 94458369826816, 94458369835007, +STORE, 94458369835008, 94458369847295, +STORE, 94458393292800, 94458399666175, +STORE, 140619773841408, 140619775500287, +STORE, 140619775500288, 140619777597439, +STORE, 140619777597440, 140619777613823, +STORE, 140619777613824, 140619777622015, +STORE, 140619777622016, 140619777638399, +STORE, 140619777638400, 140619777650687, +STORE, 140619777650688, 140619779743743, +STORE, 140619779743744, 140619779747839, +STORE, 140619779747840, 140619779751935, +STORE, 140619779751936, 140619779895295, +STORE, 140619780263936, 140619781947391, +STORE, 140619781947392, 140619781963775, +STORE, 140619781992448, 140619781996543, +STORE, 140619781996544, 140619782000639, +STORE, 140619782000640, 140619782004735, +STORE, 140725811675136, 140725811814399, +STORE, 140725812813824, 140725812826111, +STORE, 140725812826112, 140725812830207, +STORE, 94458367512576, 94458367725567, +STORE, 94458369822720, 94458369826815, +STORE, 94458369826816, 94458369835007, +STORE, 94458369835008, 94458369847295, +STORE, 94458393292800, 94458400366591, +STORE, 140619773841408, 140619775500287, +STORE, 140619775500288, 140619777597439, +STORE, 140619777597440, 140619777613823, +STORE, 140619777613824, 140619777622015, +STORE, 140619777622016, 140619777638399, +STORE, 140619777638400, 140619777650687, +STORE, 140619777650688, 140619779743743, +STORE, 140619779743744, 140619779747839, +STORE, 140619779747840, 140619779751935, +STORE, 140619779751936, 140619779895295, +STORE, 140619780263936, 140619781947391, +STORE, 140619781947392, 140619781963775, +STORE, 140619781992448, 140619781996543, +STORE, 140619781996544, 140619782000639, +STORE, 140619782000640, 140619782004735, +STORE, 140725811675136, 140725811814399, +STORE, 140725812813824, 140725812826111, +STORE, 140725812826112, 140725812830207, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728740679680, 140737488351231, +SNULL, 140728740687871, 140737488351231, +STORE, 140728740679680, 140728740687871, +STORE, 140728740548608, 140728740687871, +STORE, 94764075249664, 94764077473791, +SNULL, 94764075360255, 94764077473791, +STORE, 94764075249664, 94764075360255, +STORE, 94764075360256, 94764077473791, +ERASE, 94764075360256, 94764077473791, +STORE, 94764077453312, 94764077465599, +STORE, 94764077465600, 94764077473791, +STORE, 139766406791168, 139766409043967, +SNULL, 139766406934527, 139766409043967, +STORE, 139766406791168, 139766406934527, +STORE, 139766406934528, 139766409043967, +ERASE, 139766406934528, 139766409043967, +STORE, 139766409031680, 139766409039871, +STORE, 139766409039872, 139766409043967, +STORE, 140728740913152, 140728740917247, +STORE, 140728740900864, 140728740913151, +STORE, 139766409003008, 139766409031679, +STORE, 139766408994816, 139766409003007, +STORE, 139766402994176, 139766406791167, +SNULL, 139766402994176, 139766404653055, +STORE, 139766404653056, 139766406791167, +STORE, 139766402994176, 139766404653055, +SNULL, 139766406750207, 139766406791167, +STORE, 139766404653056, 139766406750207, +STORE, 139766406750208, 139766406791167, +SNULL, 139766406750208, 139766406774783, +STORE, 139766406774784, 139766406791167, +STORE, 139766406750208, 139766406774783, +ERASE, 139766406750208, 139766406774783, +STORE, 139766406750208, 139766406774783, +ERASE, 139766406774784, 139766406791167, +STORE, 139766406774784, 139766406791167, +SNULL, 139766406766591, 139766406774783, +STORE, 139766406750208, 139766406766591, +STORE, 139766406766592, 139766406774783, +SNULL, 94764077461503, 94764077465599, +STORE, 94764077453312, 94764077461503, +STORE, 94764077461504, 94764077465599, +SNULL, 139766409035775, 139766409039871, +STORE, 139766409031680, 139766409035775, +STORE, 139766409035776, 139766409039871, +ERASE, 139766409003008, 139766409031679, +STORE, 94764090458112, 94764090593279, +STORE, 94758057480192, 94758057590783, +STORE, 94758059683840, 94758059692031, +STORE, 94758059692032, 94758059696127, +STORE, 94758059696128, 94758059704319, +STORE, 94758083215360, 94758083350527, +STORE, 139951456772096, 139951458430975, +STORE, 139951458430976, 139951460528127, +STORE, 139951460528128, 139951460544511, +STORE, 139951460544512, 139951460552703, +STORE, 139951460552704, 139951460569087, +STORE, 139951460569088, 139951460712447, +STORE, 139951462772736, 139951462780927, +STORE, 139951462809600, 139951462813695, +STORE, 139951462813696, 139951462817791, +STORE, 139951462817792, 139951462821887, +STORE, 140734098313216, 140734098452479, +STORE, 140734098911232, 140734098923519, +STORE, 140734098923520, 140734098927615, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724904095744, 140737488351231, +SNULL, 140724904103935, 140737488351231, +STORE, 140724904095744, 140724904103935, +STORE, 140724903964672, 140724904103935, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140408497864704, 140408500117503, +SNULL, 140408498008063, 140408500117503, +STORE, 140408497864704, 140408498008063, +STORE, 140408498008064, 140408500117503, +ERASE, 140408498008064, 140408500117503, +STORE, 140408500105216, 140408500113407, +STORE, 140408500113408, 140408500117503, +STORE, 140724905369600, 140724905373695, +STORE, 140724905357312, 140724905369599, +STORE, 140408500076544, 140408500105215, +STORE, 140408500068352, 140408500076543, +STORE, 140408494702592, 140408497864703, +SNULL, 140408494702592, 140408495763455, +STORE, 140408495763456, 140408497864703, +STORE, 140408494702592, 140408495763455, +SNULL, 140408497856511, 140408497864703, +STORE, 140408495763456, 140408497856511, +STORE, 140408497856512, 140408497864703, +ERASE, 140408497856512, 140408497864703, +STORE, 140408497856512, 140408497864703, +STORE, 140408490905600, 140408494702591, +SNULL, 140408490905600, 140408492564479, +STORE, 140408492564480, 140408494702591, +STORE, 140408490905600, 140408492564479, +SNULL, 140408494661631, 140408494702591, +STORE, 140408492564480, 140408494661631, +STORE, 140408494661632, 140408494702591, +SNULL, 140408494661632, 140408494686207, +STORE, 140408494686208, 140408494702591, +STORE, 140408494661632, 140408494686207, +ERASE, 140408494661632, 140408494686207, +STORE, 140408494661632, 140408494686207, +ERASE, 140408494686208, 140408494702591, +STORE, 140408494686208, 140408494702591, +STORE, 140408500056064, 140408500076543, +SNULL, 140408494678015, 140408494686207, +STORE, 140408494661632, 140408494678015, +STORE, 140408494678016, 140408494686207, +SNULL, 140408497860607, 140408497864703, +STORE, 140408497856512, 140408497860607, +STORE, 140408497860608, 140408497864703, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140408500109311, 140408500113407, +STORE, 140408500105216, 140408500109311, +STORE, 140408500109312, 140408500113407, +ERASE, 140408500076544, 140408500105215, +STORE, 25235456, 25370623, +STORE, 25235456, 25518079, +STORE, 140408498372608, 140408500056063, +STORE, 94543937388544, 94543937499135, +STORE, 94543939592192, 94543939600383, +STORE, 94543939600384, 94543939604479, +STORE, 94543939604480, 94543939612671, +STORE, 94543941447680, 94543941582847, +STORE, 140282621947904, 140282623606783, +STORE, 140282623606784, 140282625703935, +STORE, 140282625703936, 140282625720319, +STORE, 140282625720320, 140282625728511, +STORE, 140282625728512, 140282625744895, +STORE, 140282625744896, 140282625888255, +STORE, 140282627948544, 140282627956735, +STORE, 140282627985408, 140282627989503, +STORE, 140282627989504, 140282627993599, +STORE, 140282627993600, 140282627997695, +STORE, 140728295723008, 140728295862271, +STORE, 140728296476672, 140728296488959, +STORE, 140728296488960, 140728296493055, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431510691839, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431510691839, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140737488338944, 140737488351231, +STORE, 140736944451584, 140737488351231, +SNULL, 140736944463871, 140737488351231, +STORE, 140736944451584, 140736944463871, +STORE, 140736944320512, 140736944463871, +STORE, 4194304, 26279935, +STORE, 28372992, 28454911, +STORE, 28454912, 29806591, +STORE, 139693609893888, 139693612146687, +SNULL, 139693610037247, 139693612146687, +STORE, 139693609893888, 139693610037247, +STORE, 139693610037248, 139693612146687, +ERASE, 139693610037248, 139693612146687, +STORE, 139693612134400, 139693612142591, +STORE, 139693612142592, 139693612146687, +STORE, 140736945152000, 140736945156095, +STORE, 140736945139712, 140736945151999, +STORE, 139693612105728, 139693612134399, +STORE, 139693612097536, 139693612105727, +STORE, 139693606060032, 139693609893887, +SNULL, 139693606060032, 139693607768063, +STORE, 139693607768064, 139693609893887, +STORE, 139693606060032, 139693607768063, +SNULL, 139693609861119, 139693609893887, +STORE, 139693607768064, 139693609861119, +STORE, 139693609861120, 139693609893887, +ERASE, 139693609861120, 139693609893887, +STORE, 139693609861120, 139693609893887, +STORE, 139693603864576, 139693606060031, +SNULL, 139693603864576, 139693603958783, +STORE, 139693603958784, 139693606060031, +STORE, 139693603864576, 139693603958783, +SNULL, 139693606051839, 139693606060031, +STORE, 139693603958784, 139693606051839, +STORE, 139693606051840, 139693606060031, +ERASE, 139693606051840, 139693606060031, +STORE, 139693606051840, 139693606060031, +STORE, 139693601345536, 139693603864575, +SNULL, 139693601345536, 139693601759231, +STORE, 139693601759232, 139693603864575, +STORE, 139693601345536, 139693601759231, +SNULL, 139693603852287, 139693603864575, +STORE, 139693601759232, 139693603852287, +STORE, 139693603852288, 139693603864575, +ERASE, 139693603852288, 139693603864575, +STORE, 139693603852288, 139693603864575, +STORE, 139693598711808, 139693601345535, +SNULL, 139693598711808, 139693599240191, +STORE, 139693599240192, 139693601345535, +STORE, 139693598711808, 139693599240191, +SNULL, 139693601337343, 139693601345535, +STORE, 139693599240192, 139693601337343, +STORE, 139693601337344, 139693601345535, +ERASE, 139693601337344, 139693601345535, +STORE, 139693601337344, 139693601345535, +STORE, 139693596598272, 139693598711807, +SNULL, 139693596598272, 139693596610559, +STORE, 139693596610560, 139693598711807, +STORE, 139693596598272, 139693596610559, +SNULL, 139693598703615, 139693598711807, +STORE, 139693596610560, 139693598703615, +STORE, 139693598703616, 139693598711807, +ERASE, 139693598703616, 139693598711807, +STORE, 139693598703616, 139693598711807, +STORE, 139693594394624, 139693596598271, +SNULL, 139693594394624, 139693594497023, +STORE, 139693594497024, 139693596598271, +STORE, 139693594394624, 139693594497023, +SNULL, 139693596590079, 139693596598271, +STORE, 139693594497024, 139693596590079, +STORE, 139693596590080, 139693596598271, +ERASE, 139693596590080, 139693596598271, +STORE, 139693596590080, 139693596598271, +STORE, 139693612089344, 139693612105727, +STORE, 139693591232512, 139693594394623, +SNULL, 139693591232512, 139693592293375, +STORE, 139693592293376, 139693594394623, +STORE, 139693591232512, 139693592293375, +SNULL, 139693594386431, 139693594394623, +STORE, 139693592293376, 139693594386431, +STORE, 139693594386432, 139693594394623, +ERASE, 139693594386432, 139693594394623, +STORE, 139693594386432, 139693594394623, +STORE, 139693587435520, 139693591232511, +SNULL, 139693587435520, 139693589094399, +STORE, 139693589094400, 139693591232511, +STORE, 139693587435520, 139693589094399, +SNULL, 139693591191551, 139693591232511, +STORE, 139693589094400, 139693591191551, +STORE, 139693591191552, 139693591232511, +SNULL, 139693591191552, 139693591216127, +STORE, 139693591216128, 139693591232511, +STORE, 139693591191552, 139693591216127, +ERASE, 139693591191552, 139693591216127, +STORE, 139693591191552, 139693591216127, +ERASE, 139693591216128, 139693591232511, +STORE, 139693591216128, 139693591232511, +STORE, 139693612077056, 139693612105727, +SNULL, 139693591207935, 139693591216127, +STORE, 139693591191552, 139693591207935, +STORE, 139693591207936, 139693591216127, +SNULL, 139693594390527, 139693594394623, +STORE, 139693594386432, 139693594390527, +STORE, 139693594390528, 139693594394623, +SNULL, 139693596594175, 139693596598271, +STORE, 139693596590080, 139693596594175, +STORE, 139693596594176, 139693596598271, +SNULL, 139693598707711, 139693598711807, +STORE, 139693598703616, 139693598707711, +STORE, 139693598707712, 139693598711807, +SNULL, 139693601341439, 139693601345535, +STORE, 139693601337344, 139693601341439, +STORE, 139693601341440, 139693601345535, +SNULL, 139693603860479, 139693603864575, +STORE, 139693603852288, 139693603860479, +STORE, 139693603860480, 139693603864575, +SNULL, 139693606055935, 139693606060031, +STORE, 139693606051840, 139693606055935, +STORE, 139693606055936, 139693606060031, +SNULL, 139693609865215, 139693609893887, +STORE, 139693609861120, 139693609865215, +STORE, 139693609865216, 139693609893887, +SNULL, 28405759, 28454911, +STORE, 28372992, 28405759, +STORE, 28405760, 28454911, +SNULL, 139693612138495, 139693612142591, +STORE, 139693612134400, 139693612138495, +STORE, 139693612138496, 139693612142591, +ERASE, 139693612105728, 139693612134399, +STORE, 39976960, 40112127, +STORE, 139693610393600, 139693612077055, +STORE, 139693612130304, 139693612134399, +STORE, 139693610258432, 139693610393599, +STORE, 39976960, 40255487, +STORE, 139693585338368, 139693587435519, +STORE, 139693612122112, 139693612134399, +STORE, 139693612113920, 139693612134399, +STORE, 139693612077056, 139693612113919, +STORE, 139693610242048, 139693610393599, +STORE, 39976960, 40390655, +STORE, 39976960, 40546303, +STORE, 139693610233856, 139693610393599, +STORE, 139693610225664, 139693610393599, +STORE, 39976960, 40714239, +STORE, 139693610209280, 139693610393599, +STORE, 39976960, 40861695, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431528759295, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140729993904128, 140737488351231, +SNULL, 140729993912319, 140737488351231, +STORE, 140729993904128, 140729993912319, +STORE, 140729993773056, 140729993912319, +STORE, 93926271991808, 93926274215935, +SNULL, 93926272102399, 93926274215935, +STORE, 93926271991808, 93926272102399, +STORE, 93926272102400, 93926274215935, +ERASE, 93926272102400, 93926274215935, +STORE, 93926274195456, 93926274207743, +STORE, 93926274207744, 93926274215935, +STORE, 139962167296000, 139962169548799, +SNULL, 139962167439359, 139962169548799, +STORE, 139962167296000, 139962167439359, +STORE, 139962167439360, 139962169548799, +ERASE, 139962167439360, 139962169548799, +STORE, 139962169536512, 139962169544703, +STORE, 139962169544704, 139962169548799, +STORE, 140729995096064, 140729995100159, +STORE, 140729995083776, 140729995096063, +STORE, 139962169507840, 139962169536511, +STORE, 139962169499648, 139962169507839, +STORE, 139962163499008, 139962167295999, +SNULL, 139962163499008, 139962165157887, +STORE, 139962165157888, 139962167295999, +STORE, 139962163499008, 139962165157887, +SNULL, 139962167255039, 139962167295999, +STORE, 139962165157888, 139962167255039, +STORE, 139962167255040, 139962167295999, +SNULL, 139962167255040, 139962167279615, +STORE, 139962167279616, 139962167295999, +STORE, 139962167255040, 139962167279615, +ERASE, 139962167255040, 139962167279615, +STORE, 139962167255040, 139962167279615, +ERASE, 139962167279616, 139962167295999, +STORE, 139962167279616, 139962167295999, +SNULL, 139962167271423, 139962167279615, +STORE, 139962167255040, 139962167271423, +STORE, 139962167271424, 139962167279615, +SNULL, 93926274203647, 93926274207743, +STORE, 93926274195456, 93926274203647, +STORE, 93926274203648, 93926274207743, +SNULL, 139962169540607, 139962169544703, +STORE, 139962169536512, 139962169540607, +STORE, 139962169540608, 139962169544703, +ERASE, 139962169507840, 139962169536511, +STORE, 93926291120128, 93926291255295, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724960579584, 140737488351231, +SNULL, 140724960587775, 140737488351231, +STORE, 140724960579584, 140724960587775, +STORE, 140724960448512, 140724960587775, +STORE, 94246489489408, 94246491713535, +SNULL, 94246489599999, 94246491713535, +STORE, 94246489489408, 94246489599999, +STORE, 94246489600000, 94246491713535, +ERASE, 94246489600000, 94246491713535, +STORE, 94246491693056, 94246491705343, +STORE, 94246491705344, 94246491713535, +STORE, 140098174926848, 140098177179647, +SNULL, 140098175070207, 140098177179647, +STORE, 140098174926848, 140098175070207, +STORE, 140098175070208, 140098177179647, +ERASE, 140098175070208, 140098177179647, +STORE, 140098177167360, 140098177175551, +STORE, 140098177175552, 140098177179647, +STORE, 140724961439744, 140724961443839, +STORE, 140724961427456, 140724961439743, +STORE, 140098177138688, 140098177167359, +STORE, 140098177130496, 140098177138687, +STORE, 140098171129856, 140098174926847, +SNULL, 140098171129856, 140098172788735, +STORE, 140098172788736, 140098174926847, +STORE, 140098171129856, 140098172788735, +SNULL, 140098174885887, 140098174926847, +STORE, 140098172788736, 140098174885887, +STORE, 140098174885888, 140098174926847, +SNULL, 140098174885888, 140098174910463, +STORE, 140098174910464, 140098174926847, +STORE, 140098174885888, 140098174910463, +ERASE, 140098174885888, 140098174910463, +STORE, 140098174885888, 140098174910463, +ERASE, 140098174910464, 140098174926847, +STORE, 140098174910464, 140098174926847, +SNULL, 140098174902271, 140098174910463, +STORE, 140098174885888, 140098174902271, +STORE, 140098174902272, 140098174910463, +SNULL, 94246491701247, 94246491705343, +STORE, 94246491693056, 94246491701247, +STORE, 94246491701248, 94246491705343, +SNULL, 140098177171455, 140098177175551, +STORE, 140098177167360, 140098177171455, +STORE, 140098177171456, 140098177175551, +ERASE, 140098177138688, 140098177167359, +STORE, 94246516998144, 94246517133311, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140730522918912, 140737488351231, +SNULL, 140730522927103, 140737488351231, +STORE, 140730522918912, 140730522927103, +STORE, 140730522787840, 140730522927103, +STORE, 94196043120640, 94196045344767, +SNULL, 94196043231231, 94196045344767, +STORE, 94196043120640, 94196043231231, +STORE, 94196043231232, 94196045344767, +ERASE, 94196043231232, 94196045344767, +STORE, 94196045324288, 94196045336575, +STORE, 94196045336576, 94196045344767, +STORE, 139815918940160, 139815921192959, +SNULL, 139815919083519, 139815921192959, +STORE, 139815918940160, 139815919083519, +STORE, 139815919083520, 139815921192959, +ERASE, 139815919083520, 139815921192959, +STORE, 139815921180672, 139815921188863, +STORE, 139815921188864, 139815921192959, +STORE, 140730523344896, 140730523348991, +STORE, 140730523332608, 140730523344895, +STORE, 139815921152000, 139815921180671, +STORE, 139815921143808, 139815921151999, +STORE, 139815915143168, 139815918940159, +SNULL, 139815915143168, 139815916802047, +STORE, 139815916802048, 139815918940159, +STORE, 139815915143168, 139815916802047, +SNULL, 139815918899199, 139815918940159, +STORE, 139815916802048, 139815918899199, +STORE, 139815918899200, 139815918940159, +SNULL, 139815918899200, 139815918923775, +STORE, 139815918923776, 139815918940159, +STORE, 139815918899200, 139815918923775, +ERASE, 139815918899200, 139815918923775, +STORE, 139815918899200, 139815918923775, +ERASE, 139815918923776, 139815918940159, +STORE, 139815918923776, 139815918940159, +SNULL, 139815918915583, 139815918923775, +STORE, 139815918899200, 139815918915583, +STORE, 139815918915584, 139815918923775, +SNULL, 94196045332479, 94196045336575, +STORE, 94196045324288, 94196045332479, +STORE, 94196045332480, 94196045336575, +SNULL, 139815921184767, 139815921188863, +STORE, 139815921180672, 139815921184767, +STORE, 139815921184768, 139815921188863, +ERASE, 139815921152000, 139815921180671, +STORE, 94196076183552, 94196076318719, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722460393472, 140737488351231, +SNULL, 140722460401663, 140737488351231, +STORE, 140722460393472, 140722460401663, +STORE, 140722460262400, 140722460401663, +STORE, 94569810399232, 94569812623359, +SNULL, 94569810509823, 94569812623359, +STORE, 94569810399232, 94569810509823, +STORE, 94569810509824, 94569812623359, +ERASE, 94569810509824, 94569812623359, +STORE, 94569812602880, 94569812615167, +STORE, 94569812615168, 94569812623359, +STORE, 139681565450240, 139681567703039, +SNULL, 139681565593599, 139681567703039, +STORE, 139681565450240, 139681565593599, +STORE, 139681565593600, 139681567703039, +ERASE, 139681565593600, 139681567703039, +STORE, 139681567690752, 139681567698943, +STORE, 139681567698944, 139681567703039, +STORE, 140722460569600, 140722460573695, +STORE, 140722460557312, 140722460569599, +STORE, 139681567662080, 139681567690751, +STORE, 139681567653888, 139681567662079, +STORE, 139681561653248, 139681565450239, +SNULL, 139681561653248, 139681563312127, +STORE, 139681563312128, 139681565450239, +STORE, 139681561653248, 139681563312127, +SNULL, 139681565409279, 139681565450239, +STORE, 139681563312128, 139681565409279, +STORE, 139681565409280, 139681565450239, +SNULL, 139681565409280, 139681565433855, +STORE, 139681565433856, 139681565450239, +STORE, 139681565409280, 139681565433855, +ERASE, 139681565409280, 139681565433855, +STORE, 139681565409280, 139681565433855, +ERASE, 139681565433856, 139681565450239, +STORE, 139681565433856, 139681565450239, +SNULL, 139681565425663, 139681565433855, +STORE, 139681565409280, 139681565425663, +STORE, 139681565425664, 139681565433855, +SNULL, 94569812611071, 94569812615167, +STORE, 94569812602880, 94569812611071, +STORE, 94569812611072, 94569812615167, +SNULL, 139681567694847, 139681567698943, +STORE, 139681567690752, 139681567694847, +STORE, 139681567694848, 139681567698943, +ERASE, 139681567662080, 139681567690751, +STORE, 94569818066944, 94569818202111, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431534280703, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725452365824, 140737488351231, +SNULL, 140725452374015, 140737488351231, +STORE, 140725452365824, 140725452374015, +STORE, 140725452234752, 140725452374015, +STORE, 94395067465728, 94395069689855, +SNULL, 94395067576319, 94395069689855, +STORE, 94395067465728, 94395067576319, +STORE, 94395067576320, 94395069689855, +ERASE, 94395067576320, 94395069689855, +STORE, 94395069669376, 94395069681663, +STORE, 94395069681664, 94395069689855, +STORE, 140269941211136, 140269943463935, +SNULL, 140269941354495, 140269943463935, +STORE, 140269941211136, 140269941354495, +STORE, 140269941354496, 140269943463935, +ERASE, 140269941354496, 140269943463935, +STORE, 140269943451648, 140269943459839, +STORE, 140269943459840, 140269943463935, +STORE, 140725452558336, 140725452562431, +STORE, 140725452546048, 140725452558335, +STORE, 140269943422976, 140269943451647, +STORE, 140269943414784, 140269943422975, +STORE, 140269937414144, 140269941211135, +SNULL, 140269937414144, 140269939073023, +STORE, 140269939073024, 140269941211135, +STORE, 140269937414144, 140269939073023, +SNULL, 140269941170175, 140269941211135, +STORE, 140269939073024, 140269941170175, +STORE, 140269941170176, 140269941211135, +SNULL, 140269941170176, 140269941194751, +STORE, 140269941194752, 140269941211135, +STORE, 140269941170176, 140269941194751, +ERASE, 140269941170176, 140269941194751, +STORE, 140269941170176, 140269941194751, +ERASE, 140269941194752, 140269941211135, +STORE, 140269941194752, 140269941211135, +SNULL, 140269941186559, 140269941194751, +STORE, 140269941170176, 140269941186559, +STORE, 140269941186560, 140269941194751, +SNULL, 94395069677567, 94395069681663, +STORE, 94395069669376, 94395069677567, +STORE, 94395069677568, 94395069681663, +SNULL, 140269943455743, 140269943459839, +STORE, 140269943451648, 140269943455743, +STORE, 140269943455744, 140269943459839, +ERASE, 140269943422976, 140269943451647, +STORE, 94395101691904, 94395101827071, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733860118528, 140737488351231, +SNULL, 140733860126719, 140737488351231, +STORE, 140733860118528, 140733860126719, +STORE, 140733859987456, 140733860126719, +STORE, 94484752990208, 94484755214335, +SNULL, 94484753100799, 94484755214335, +STORE, 94484752990208, 94484753100799, +STORE, 94484753100800, 94484755214335, +ERASE, 94484753100800, 94484755214335, +STORE, 94484755193856, 94484755206143, +STORE, 94484755206144, 94484755214335, +STORE, 139958922309632, 139958924562431, +SNULL, 139958922452991, 139958924562431, +STORE, 139958922309632, 139958922452991, +STORE, 139958922452992, 139958924562431, +ERASE, 139958922452992, 139958924562431, +STORE, 139958924550144, 139958924558335, +STORE, 139958924558336, 139958924562431, +STORE, 140733860253696, 140733860257791, +STORE, 140733860241408, 140733860253695, +STORE, 139958924521472, 139958924550143, +STORE, 139958924513280, 139958924521471, +STORE, 139958918512640, 139958922309631, +SNULL, 139958918512640, 139958920171519, +STORE, 139958920171520, 139958922309631, +STORE, 139958918512640, 139958920171519, +SNULL, 139958922268671, 139958922309631, +STORE, 139958920171520, 139958922268671, +STORE, 139958922268672, 139958922309631, +SNULL, 139958922268672, 139958922293247, +STORE, 139958922293248, 139958922309631, +STORE, 139958922268672, 139958922293247, +ERASE, 139958922268672, 139958922293247, +STORE, 139958922268672, 139958922293247, +ERASE, 139958922293248, 139958922309631, +STORE, 139958922293248, 139958922309631, +SNULL, 139958922285055, 139958922293247, +STORE, 139958922268672, 139958922285055, +STORE, 139958922285056, 139958922293247, +SNULL, 94484755202047, 94484755206143, +STORE, 94484755193856, 94484755202047, +STORE, 94484755202048, 94484755206143, +SNULL, 139958924554239, 139958924558335, +STORE, 139958924550144, 139958924554239, +STORE, 139958924554240, 139958924558335, +ERASE, 139958924521472, 139958924550143, +STORE, 94484777615360, 94484777750527, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731051036672, 140737488351231, +SNULL, 140731051044863, 140737488351231, +STORE, 140731051036672, 140731051044863, +STORE, 140731050905600, 140731051044863, +STORE, 93945822998528, 93945825222655, +SNULL, 93945823109119, 93945825222655, +STORE, 93945822998528, 93945823109119, +STORE, 93945823109120, 93945825222655, +ERASE, 93945823109120, 93945825222655, +STORE, 93945825202176, 93945825214463, +STORE, 93945825214464, 93945825222655, +STORE, 140153503997952, 140153506250751, +SNULL, 140153504141311, 140153506250751, +STORE, 140153503997952, 140153504141311, +STORE, 140153504141312, 140153506250751, +ERASE, 140153504141312, 140153506250751, +STORE, 140153506238464, 140153506246655, +STORE, 140153506246656, 140153506250751, +STORE, 140731051331584, 140731051335679, +STORE, 140731051319296, 140731051331583, +STORE, 140153506209792, 140153506238463, +STORE, 140153506201600, 140153506209791, +STORE, 140153500200960, 140153503997951, +SNULL, 140153500200960, 140153501859839, +STORE, 140153501859840, 140153503997951, +STORE, 140153500200960, 140153501859839, +SNULL, 140153503956991, 140153503997951, +STORE, 140153501859840, 140153503956991, +STORE, 140153503956992, 140153503997951, +SNULL, 140153503956992, 140153503981567, +STORE, 140153503981568, 140153503997951, +STORE, 140153503956992, 140153503981567, +ERASE, 140153503956992, 140153503981567, +STORE, 140153503956992, 140153503981567, +ERASE, 140153503981568, 140153503997951, +STORE, 140153503981568, 140153503997951, +SNULL, 140153503973375, 140153503981567, +STORE, 140153503956992, 140153503973375, +STORE, 140153503973376, 140153503981567, +SNULL, 93945825210367, 93945825214463, +STORE, 93945825202176, 93945825210367, +STORE, 93945825210368, 93945825214463, +SNULL, 140153506242559, 140153506246655, +STORE, 140153506238464, 140153506242559, +STORE, 140153506242560, 140153506246655, +ERASE, 140153506209792, 140153506238463, +STORE, 93945854537728, 93945854672895, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431537885183, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140736025325568, 140737488351231, +SNULL, 140736025333759, 140737488351231, +STORE, 140736025325568, 140736025333759, +STORE, 140736025194496, 140736025333759, +STORE, 94809095172096, 94809097396223, +SNULL, 94809095282687, 94809097396223, +STORE, 94809095172096, 94809095282687, +STORE, 94809095282688, 94809097396223, +ERASE, 94809095282688, 94809097396223, +STORE, 94809097375744, 94809097388031, +STORE, 94809097388032, 94809097396223, +STORE, 140194992517120, 140194994769919, +SNULL, 140194992660479, 140194994769919, +STORE, 140194992517120, 140194992660479, +STORE, 140194992660480, 140194994769919, +ERASE, 140194992660480, 140194994769919, +STORE, 140194994757632, 140194994765823, +STORE, 140194994765824, 140194994769919, +STORE, 140736026173440, 140736026177535, +STORE, 140736026161152, 140736026173439, +STORE, 140194994728960, 140194994757631, +STORE, 140194994720768, 140194994728959, +STORE, 140194988720128, 140194992517119, +SNULL, 140194988720128, 140194990379007, +STORE, 140194990379008, 140194992517119, +STORE, 140194988720128, 140194990379007, +SNULL, 140194992476159, 140194992517119, +STORE, 140194990379008, 140194992476159, +STORE, 140194992476160, 140194992517119, +SNULL, 140194992476160, 140194992500735, +STORE, 140194992500736, 140194992517119, +STORE, 140194992476160, 140194992500735, +ERASE, 140194992476160, 140194992500735, +STORE, 140194992476160, 140194992500735, +ERASE, 140194992500736, 140194992517119, +STORE, 140194992500736, 140194992517119, +SNULL, 140194992492543, 140194992500735, +STORE, 140194992476160, 140194992492543, +STORE, 140194992492544, 140194992500735, +SNULL, 94809097383935, 94809097388031, +STORE, 94809097375744, 94809097383935, +STORE, 94809097383936, 94809097388031, +SNULL, 140194994761727, 140194994765823, +STORE, 140194994757632, 140194994761727, +STORE, 140194994761728, 140194994765823, +ERASE, 140194994728960, 140194994757631, +STORE, 94809124286464, 94809124421631, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140726342660096, 140737488351231, +SNULL, 140726342668287, 140737488351231, +STORE, 140726342660096, 140726342668287, +STORE, 140726342529024, 140726342668287, +STORE, 94140331462656, 94140333686783, +SNULL, 94140331573247, 94140333686783, +STORE, 94140331462656, 94140331573247, +STORE, 94140331573248, 94140333686783, +ERASE, 94140331573248, 94140333686783, +STORE, 94140333666304, 94140333678591, +STORE, 94140333678592, 94140333686783, +STORE, 140714077208576, 140714079461375, +SNULL, 140714077351935, 140714079461375, +STORE, 140714077208576, 140714077351935, +STORE, 140714077351936, 140714079461375, +ERASE, 140714077351936, 140714079461375, +STORE, 140714079449088, 140714079457279, +STORE, 140714079457280, 140714079461375, +STORE, 140726343933952, 140726343938047, +STORE, 140726343921664, 140726343933951, +STORE, 140714079420416, 140714079449087, +STORE, 140714079412224, 140714079420415, +STORE, 140714073411584, 140714077208575, +SNULL, 140714073411584, 140714075070463, +STORE, 140714075070464, 140714077208575, +STORE, 140714073411584, 140714075070463, +SNULL, 140714077167615, 140714077208575, +STORE, 140714075070464, 140714077167615, +STORE, 140714077167616, 140714077208575, +SNULL, 140714077167616, 140714077192191, +STORE, 140714077192192, 140714077208575, +STORE, 140714077167616, 140714077192191, +ERASE, 140714077167616, 140714077192191, +STORE, 140714077167616, 140714077192191, +ERASE, 140714077192192, 140714077208575, +STORE, 140714077192192, 140714077208575, +SNULL, 140714077183999, 140714077192191, +STORE, 140714077167616, 140714077183999, +STORE, 140714077184000, 140714077192191, +SNULL, 94140333674495, 94140333678591, +STORE, 94140333666304, 94140333674495, +STORE, 94140333674496, 94140333678591, +SNULL, 140714079453183, 140714079457279, +STORE, 140714079449088, 140714079453183, +STORE, 140714079453184, 140714079457279, +ERASE, 140714079420416, 140714079449087, +STORE, 94140341432320, 94140341567487, +STORE, 94431504838656, 94431505051647, +STORE, 94431507148800, 94431507152895, +STORE, 94431507152896, 94431507161087, +STORE, 94431507161088, 94431507173375, +STORE, 94431510286336, 94431539601407, +STORE, 139818797948928, 139818799607807, +STORE, 139818799607808, 139818801704959, +STORE, 139818801704960, 139818801721343, +STORE, 139818801721344, 139818801729535, +STORE, 139818801729536, 139818801745919, +STORE, 139818801745920, 139818801758207, +STORE, 139818801758208, 139818803851263, +STORE, 139818803851264, 139818803855359, +STORE, 139818803855360, 139818803859455, +STORE, 139818803859456, 139818804002815, +STORE, 139818804371456, 139818806054911, +STORE, 139818806054912, 139818806071295, +STORE, 139818806099968, 139818806104063, +STORE, 139818806104064, 139818806108159, +STORE, 139818806108160, 139818806112255, +STORE, 140731430457344, 140731430596607, +STORE, 140731431227392, 140731431239679, +STORE, 140731431239680, 140731431243775, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725843607552, 140737488351231, +SNULL, 140725843615743, 140737488351231, +STORE, 140725843607552, 140725843615743, +STORE, 140725843476480, 140725843615743, +STORE, 94889043505152, 94889045839871, +SNULL, 94889043718143, 94889045839871, +STORE, 94889043505152, 94889043718143, +STORE, 94889043718144, 94889045839871, +ERASE, 94889043718144, 94889045839871, +STORE, 94889045815296, 94889045827583, +STORE, 94889045827584, 94889045839871, +STORE, 140250965946368, 140250968199167, +SNULL, 140250966089727, 140250968199167, +STORE, 140250965946368, 140250966089727, +STORE, 140250966089728, 140250968199167, +ERASE, 140250966089728, 140250968199167, +STORE, 140250968186880, 140250968195071, +STORE, 140250968195072, 140250968199167, +STORE, 140725844500480, 140725844504575, +STORE, 140725844488192, 140725844500479, +STORE, 140250968158208, 140250968186879, +STORE, 140250968150016, 140250968158207, +STORE, 140250963832832, 140250965946367, +SNULL, 140250963832832, 140250963845119, +STORE, 140250963845120, 140250965946367, +STORE, 140250963832832, 140250963845119, +SNULL, 140250965938175, 140250965946367, +STORE, 140250963845120, 140250965938175, +STORE, 140250965938176, 140250965946367, +ERASE, 140250965938176, 140250965946367, +STORE, 140250965938176, 140250965946367, +STORE, 140250960035840, 140250963832831, +SNULL, 140250960035840, 140250961694719, +STORE, 140250961694720, 140250963832831, +STORE, 140250960035840, 140250961694719, +SNULL, 140250963791871, 140250963832831, +STORE, 140250961694720, 140250963791871, +STORE, 140250963791872, 140250963832831, +SNULL, 140250963791872, 140250963816447, +STORE, 140250963816448, 140250963832831, +STORE, 140250963791872, 140250963816447, +ERASE, 140250963791872, 140250963816447, +STORE, 140250963791872, 140250963816447, +ERASE, 140250963816448, 140250963832831, +STORE, 140250963816448, 140250963832831, +STORE, 140250968141824, 140250968158207, +SNULL, 140250963808255, 140250963816447, +STORE, 140250963791872, 140250963808255, +STORE, 140250963808256, 140250963816447, +SNULL, 140250965942271, 140250965946367, +STORE, 140250965938176, 140250965942271, +STORE, 140250965942272, 140250965946367, +SNULL, 94889045819391, 94889045827583, +STORE, 94889045815296, 94889045819391, +STORE, 94889045819392, 94889045827583, +SNULL, 140250968190975, 140250968195071, +STORE, 140250968186880, 140250968190975, +STORE, 140250968190976, 140250968195071, +ERASE, 140250968158208, 140250968186879, +STORE, 94889052213248, 94889052348415, +STORE, 140250966458368, 140250968141823, +STORE, 94889052213248, 94889052483583, +STORE, 94889052213248, 94889052618751, +STORE, 94170851819520, 94170852032511, +STORE, 94170854129664, 94170854133759, +STORE, 94170854133760, 94170854141951, +STORE, 94170854141952, 94170854154239, +STORE, 94170866515968, 94170867740671, +STORE, 140062030422016, 140062032080895, +STORE, 140062032080896, 140062034178047, +STORE, 140062034178048, 140062034194431, +STORE, 140062034194432, 140062034202623, +STORE, 140062034202624, 140062034219007, +STORE, 140062034219008, 140062034231295, +STORE, 140062034231296, 140062036324351, +STORE, 140062036324352, 140062036328447, +STORE, 140062036328448, 140062036332543, +STORE, 140062036332544, 140062036475903, +STORE, 140062036844544, 140062038527999, +STORE, 140062038528000, 140062038544383, +STORE, 140062038573056, 140062038577151, +STORE, 140062038577152, 140062038581247, +STORE, 140062038581248, 140062038585343, +STORE, 140736210550784, 140736210690047, +STORE, 140736210759680, 140736210771967, +STORE, 140736210771968, 140736210776063, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724272365568, 140737488351231, +SNULL, 140724272373759, 140737488351231, +STORE, 140724272365568, 140724272373759, +STORE, 140724272234496, 140724272373759, +STORE, 94607711965184, 94607714189311, +SNULL, 94607712075775, 94607714189311, +STORE, 94607711965184, 94607712075775, +STORE, 94607712075776, 94607714189311, +ERASE, 94607712075776, 94607714189311, +STORE, 94607714168832, 94607714181119, +STORE, 94607714181120, 94607714189311, +STORE, 140054949253120, 140054951505919, +SNULL, 140054949396479, 140054951505919, +STORE, 140054949253120, 140054949396479, +STORE, 140054949396480, 140054951505919, +ERASE, 140054949396480, 140054951505919, +STORE, 140054951493632, 140054951501823, +STORE, 140054951501824, 140054951505919, +STORE, 140724272992256, 140724272996351, +STORE, 140724272979968, 140724272992255, +STORE, 140054951464960, 140054951493631, +STORE, 140054951456768, 140054951464959, +STORE, 140054945456128, 140054949253119, +SNULL, 140054945456128, 140054947115007, +STORE, 140054947115008, 140054949253119, +STORE, 140054945456128, 140054947115007, +SNULL, 140054949212159, 140054949253119, +STORE, 140054947115008, 140054949212159, +STORE, 140054949212160, 140054949253119, +SNULL, 140054949212160, 140054949236735, +STORE, 140054949236736, 140054949253119, +STORE, 140054949212160, 140054949236735, +ERASE, 140054949212160, 140054949236735, +STORE, 140054949212160, 140054949236735, +ERASE, 140054949236736, 140054949253119, +STORE, 140054949236736, 140054949253119, +SNULL, 140054949228543, 140054949236735, +STORE, 140054949212160, 140054949228543, +STORE, 140054949228544, 140054949236735, +SNULL, 94607714177023, 94607714181119, +STORE, 94607714168832, 94607714177023, +STORE, 94607714177024, 94607714181119, +SNULL, 140054951497727, 140054951501823, +STORE, 140054951493632, 140054951497727, +STORE, 140054951497728, 140054951501823, +ERASE, 140054951464960, 140054951493631, +STORE, 94607733374976, 94607733510143, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733586923520, 140737488351231, +SNULL, 140733586931711, 140737488351231, +STORE, 140733586923520, 140733586931711, +STORE, 140733586792448, 140733586931711, +STORE, 93901634904064, 93901637128191, +SNULL, 93901635014655, 93901637128191, +STORE, 93901634904064, 93901635014655, +STORE, 93901635014656, 93901637128191, +ERASE, 93901635014656, 93901637128191, +STORE, 93901637107712, 93901637119999, +STORE, 93901637120000, 93901637128191, +STORE, 140086104784896, 140086107037695, +SNULL, 140086104928255, 140086107037695, +STORE, 140086104784896, 140086104928255, +STORE, 140086104928256, 140086107037695, +ERASE, 140086104928256, 140086107037695, +STORE, 140086107025408, 140086107033599, +STORE, 140086107033600, 140086107037695, +STORE, 140733587263488, 140733587267583, +STORE, 140733587251200, 140733587263487, +STORE, 140086106996736, 140086107025407, +STORE, 140086106988544, 140086106996735, +STORE, 140086100987904, 140086104784895, +SNULL, 140086100987904, 140086102646783, +STORE, 140086102646784, 140086104784895, +STORE, 140086100987904, 140086102646783, +SNULL, 140086104743935, 140086104784895, +STORE, 140086102646784, 140086104743935, +STORE, 140086104743936, 140086104784895, +SNULL, 140086104743936, 140086104768511, +STORE, 140086104768512, 140086104784895, +STORE, 140086104743936, 140086104768511, +ERASE, 140086104743936, 140086104768511, +STORE, 140086104743936, 140086104768511, +ERASE, 140086104768512, 140086104784895, +STORE, 140086104768512, 140086104784895, +SNULL, 140086104760319, 140086104768511, +STORE, 140086104743936, 140086104760319, +STORE, 140086104760320, 140086104768511, +SNULL, 93901637115903, 93901637119999, +STORE, 93901637107712, 93901637115903, +STORE, 93901637115904, 93901637119999, +SNULL, 140086107029503, 140086107033599, +STORE, 140086107025408, 140086107029503, +STORE, 140086107029504, 140086107033599, +ERASE, 140086106996736, 140086107025407, +STORE, 93901662715904, 93901662851071, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723365613568, 140737488351231, +SNULL, 140723365621759, 140737488351231, +STORE, 140723365613568, 140723365621759, +STORE, 140723365482496, 140723365621759, +STORE, 94759193546752, 94759195770879, +SNULL, 94759193657343, 94759195770879, +STORE, 94759193546752, 94759193657343, +STORE, 94759193657344, 94759195770879, +ERASE, 94759193657344, 94759195770879, +STORE, 94759195750400, 94759195762687, +STORE, 94759195762688, 94759195770879, +STORE, 140607636246528, 140607638499327, +SNULL, 140607636389887, 140607638499327, +STORE, 140607636246528, 140607636389887, +STORE, 140607636389888, 140607638499327, +ERASE, 140607636389888, 140607638499327, +STORE, 140607638487040, 140607638495231, +STORE, 140607638495232, 140607638499327, +STORE, 140723365900288, 140723365904383, +STORE, 140723365888000, 140723365900287, +STORE, 140607638458368, 140607638487039, +STORE, 140607638450176, 140607638458367, +STORE, 140607632449536, 140607636246527, +SNULL, 140607632449536, 140607634108415, +STORE, 140607634108416, 140607636246527, +STORE, 140607632449536, 140607634108415, +SNULL, 140607636205567, 140607636246527, +STORE, 140607634108416, 140607636205567, +STORE, 140607636205568, 140607636246527, +SNULL, 140607636205568, 140607636230143, +STORE, 140607636230144, 140607636246527, +STORE, 140607636205568, 140607636230143, +ERASE, 140607636205568, 140607636230143, +STORE, 140607636205568, 140607636230143, +ERASE, 140607636230144, 140607636246527, +STORE, 140607636230144, 140607636246527, +SNULL, 140607636221951, 140607636230143, +STORE, 140607636205568, 140607636221951, +STORE, 140607636221952, 140607636230143, +SNULL, 94759195758591, 94759195762687, +STORE, 94759195750400, 94759195758591, +STORE, 94759195758592, 94759195762687, +SNULL, 140607638491135, 140607638495231, +STORE, 140607638487040, 140607638491135, +STORE, 140607638491136, 140607638495231, +ERASE, 140607638458368, 140607638487039, +STORE, 94759204995072, 94759205130239, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732503789568, 140737488351231, +SNULL, 140732503797759, 140737488351231, +STORE, 140732503789568, 140732503797759, +STORE, 140732503658496, 140732503797759, +STORE, 94077792956416, 94077795180543, +SNULL, 94077793067007, 94077795180543, +STORE, 94077792956416, 94077793067007, +STORE, 94077793067008, 94077795180543, +ERASE, 94077793067008, 94077795180543, +STORE, 94077795160064, 94077795172351, +STORE, 94077795172352, 94077795180543, +STORE, 140359874252800, 140359876505599, +SNULL, 140359874396159, 140359876505599, +STORE, 140359874252800, 140359874396159, +STORE, 140359874396160, 140359876505599, +ERASE, 140359874396160, 140359876505599, +STORE, 140359876493312, 140359876501503, +STORE, 140359876501504, 140359876505599, +STORE, 140732504465408, 140732504469503, +STORE, 140732504453120, 140732504465407, +STORE, 140359876464640, 140359876493311, +STORE, 140359876456448, 140359876464639, +STORE, 140359870455808, 140359874252799, +SNULL, 140359870455808, 140359872114687, +STORE, 140359872114688, 140359874252799, +STORE, 140359870455808, 140359872114687, +SNULL, 140359874211839, 140359874252799, +STORE, 140359872114688, 140359874211839, +STORE, 140359874211840, 140359874252799, +SNULL, 140359874211840, 140359874236415, +STORE, 140359874236416, 140359874252799, +STORE, 140359874211840, 140359874236415, +ERASE, 140359874211840, 140359874236415, +STORE, 140359874211840, 140359874236415, +ERASE, 140359874236416, 140359874252799, +STORE, 140359874236416, 140359874252799, +SNULL, 140359874228223, 140359874236415, +STORE, 140359874211840, 140359874228223, +STORE, 140359874228224, 140359874236415, +SNULL, 94077795168255, 94077795172351, +STORE, 94077795160064, 94077795168255, +STORE, 94077795168256, 94077795172351, +SNULL, 140359876497407, 140359876501503, +STORE, 140359876493312, 140359876497407, +STORE, 140359876497408, 140359876501503, +ERASE, 140359876464640, 140359876493311, +STORE, 94077808717824, 94077808852991, +STORE, 94549486252032, 94549486465023, +STORE, 94549488562176, 94549488566271, +STORE, 94549488566272, 94549488574463, +STORE, 94549488574464, 94549488586751, +STORE, 94549503492096, 94549506121727, +STORE, 140085800894464, 140085802553343, +STORE, 140085802553344, 140085804650495, +STORE, 140085804650496, 140085804666879, +STORE, 140085804666880, 140085804675071, +STORE, 140085804675072, 140085804691455, +STORE, 140085804691456, 140085804703743, +STORE, 140085804703744, 140085806796799, +STORE, 140085806796800, 140085806800895, +STORE, 140085806800896, 140085806804991, +STORE, 140085806804992, 140085806948351, +STORE, 140085807316992, 140085809000447, +STORE, 140085809000448, 140085809016831, +STORE, 140085809045504, 140085809049599, +STORE, 140085809049600, 140085809053695, +STORE, 140085809053696, 140085809057791, +STORE, 140731810545664, 140731810684927, +STORE, 140731810967552, 140731810979839, +STORE, 140731810979840, 140731810983935, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724752330752, 140737488351231, +SNULL, 140724752338943, 140737488351231, +STORE, 140724752330752, 140724752338943, +STORE, 140724752199680, 140724752338943, +STORE, 94656357539840, 94656359874559, +SNULL, 94656357752831, 94656359874559, +STORE, 94656357539840, 94656357752831, +STORE, 94656357752832, 94656359874559, +ERASE, 94656357752832, 94656359874559, +STORE, 94656359849984, 94656359862271, +STORE, 94656359862272, 94656359874559, +STORE, 139632585203712, 139632587456511, +SNULL, 139632585347071, 139632587456511, +STORE, 139632585203712, 139632585347071, +STORE, 139632585347072, 139632587456511, +ERASE, 139632585347072, 139632587456511, +STORE, 139632587444224, 139632587452415, +STORE, 139632587452416, 139632587456511, +STORE, 139632587440128, 139632587444223, +STORE, 139632587427840, 139632587440127, +STORE, 139632587399168, 139632587427839, +STORE, 139632587390976, 139632587399167, +STORE, 139632583090176, 139632585203711, +SNULL, 139632583090176, 139632583102463, +STORE, 139632583102464, 139632585203711, +STORE, 139632583090176, 139632583102463, +SNULL, 139632585195519, 139632585203711, +STORE, 139632583102464, 139632585195519, +STORE, 139632585195520, 139632585203711, +ERASE, 139632585195520, 139632585203711, +STORE, 139632585195520, 139632585203711, +STORE, 139632579293184, 139632583090175, +SNULL, 139632579293184, 139632580952063, +STORE, 139632580952064, 139632583090175, +STORE, 139632579293184, 139632580952063, +SNULL, 139632583049215, 139632583090175, +STORE, 139632580952064, 139632583049215, +STORE, 139632583049216, 139632583090175, +SNULL, 139632583049216, 139632583073791, +STORE, 139632583073792, 139632583090175, +STORE, 139632583049216, 139632583073791, +ERASE, 139632583049216, 139632583073791, +STORE, 139632583049216, 139632583073791, +ERASE, 139632583073792, 139632583090175, +STORE, 139632583073792, 139632583090175, +STORE, 139632587382784, 139632587399167, +SNULL, 139632583065599, 139632583073791, +STORE, 139632583049216, 139632583065599, +STORE, 139632583065600, 139632583073791, +SNULL, 139632585199615, 139632585203711, +STORE, 139632585195520, 139632585199615, +STORE, 139632585199616, 139632585203711, +SNULL, 94656359854079, 94656359862271, +STORE, 94656359849984, 94656359854079, +STORE, 94656359854080, 94656359862271, +SNULL, 139632587448319, 139632587452415, +STORE, 139632587444224, 139632587448319, +STORE, 139632587448320, 139632587452415, +ERASE, 139632587399168, 139632587427839, +STORE, 94656378912768, 94656379047935, +STORE, 139632585699328, 139632587382783, +STORE, 94656378912768, 94656379183103, +STORE, 94656378912768, 94656379318271, +STORE, 94656378912768, 94656379494399, +SNULL, 94656379469823, 94656379494399, +STORE, 94656378912768, 94656379469823, +STORE, 94656379469824, 94656379494399, +ERASE, 94656379469824, 94656379494399, +STORE, 94656378912768, 94656379621375, +STORE, 94656378912768, 94656379756543, +STORE, 94656378912768, 94656379912191, +STORE, 94656378912768, 94656380055551, +STORE, 94656378912768, 94656380190719, +STORE, 94656378912768, 94656380338175, +SNULL, 94656380313599, 94656380338175, +STORE, 94656378912768, 94656380313599, +STORE, 94656380313600, 94656380338175, +ERASE, 94656380313600, 94656380338175, +STORE, 94656378912768, 94656380448767, +SNULL, 94656380432383, 94656380448767, +STORE, 94656378912768, 94656380432383, +STORE, 94656380432384, 94656380448767, +ERASE, 94656380432384, 94656380448767, +STORE, 94656378912768, 94656380567551, +STORE, 94656378912768, 94656380719103, +STORE, 94656378912768, 94656380858367, +STORE, 94656378912768, 94656380997631, +STORE, 94656378912768, 94656381132799, +SNULL, 94656381124607, 94656381132799, +STORE, 94656378912768, 94656381124607, +STORE, 94656381124608, 94656381132799, +ERASE, 94656381124608, 94656381132799, +STORE, 94656378912768, 94656381276159, +STORE, 94656378912768, 94656381427711, +STORE, 94604087611392, 94604087824383, +STORE, 94604089921536, 94604089925631, +STORE, 94604089925632, 94604089933823, +STORE, 94604089933824, 94604089946111, +STORE, 94604105125888, 94604106424319, +STORE, 140454937694208, 140454939353087, +STORE, 140454939353088, 140454941450239, +STORE, 140454941450240, 140454941466623, +STORE, 140454941466624, 140454941474815, +STORE, 140454941474816, 140454941491199, +STORE, 140454941491200, 140454941503487, +STORE, 140454941503488, 140454943596543, +STORE, 140454943596544, 140454943600639, +STORE, 140454943600640, 140454943604735, +STORE, 140454943604736, 140454943748095, +STORE, 140454944116736, 140454945800191, +STORE, 140454945800192, 140454945816575, +STORE, 140454945845248, 140454945849343, +STORE, 140454945849344, 140454945853439, +STORE, 140454945853440, 140454945857535, +STORE, 140728438214656, 140728438353919, +STORE, 140728439095296, 140728439107583, +STORE, 140728439107584, 140728439111679, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140727821099008, 140737488351231, +SNULL, 140727821107199, 140737488351231, +STORE, 140727821099008, 140727821107199, +STORE, 140727820967936, 140727821107199, +STORE, 94088457240576, 94088459575295, +SNULL, 94088457453567, 94088459575295, +STORE, 94088457240576, 94088457453567, +STORE, 94088457453568, 94088459575295, +ERASE, 94088457453568, 94088459575295, +STORE, 94088459550720, 94088459563007, +STORE, 94088459563008, 94088459575295, +STORE, 140234378989568, 140234381242367, +SNULL, 140234379132927, 140234381242367, +STORE, 140234378989568, 140234379132927, +STORE, 140234379132928, 140234381242367, +ERASE, 140234379132928, 140234381242367, +STORE, 140234381230080, 140234381238271, +STORE, 140234381238272, 140234381242367, +STORE, 140727822077952, 140727822082047, +STORE, 140727822065664, 140727822077951, +STORE, 140234381201408, 140234381230079, +STORE, 140234381193216, 140234381201407, +STORE, 140234376876032, 140234378989567, +SNULL, 140234376876032, 140234376888319, +STORE, 140234376888320, 140234378989567, +STORE, 140234376876032, 140234376888319, +SNULL, 140234378981375, 140234378989567, +STORE, 140234376888320, 140234378981375, +STORE, 140234378981376, 140234378989567, +ERASE, 140234378981376, 140234378989567, +STORE, 140234378981376, 140234378989567, +STORE, 140234373079040, 140234376876031, +SNULL, 140234373079040, 140234374737919, +STORE, 140234374737920, 140234376876031, +STORE, 140234373079040, 140234374737919, +SNULL, 140234376835071, 140234376876031, +STORE, 140234374737920, 140234376835071, +STORE, 140234376835072, 140234376876031, +SNULL, 140234376835072, 140234376859647, +STORE, 140234376859648, 140234376876031, +STORE, 140234376835072, 140234376859647, +ERASE, 140234376835072, 140234376859647, +STORE, 140234376835072, 140234376859647, +ERASE, 140234376859648, 140234376876031, +STORE, 140234376859648, 140234376876031, +STORE, 140234381185024, 140234381201407, +SNULL, 140234376851455, 140234376859647, +STORE, 140234376835072, 140234376851455, +STORE, 140234376851456, 140234376859647, +SNULL, 140234378985471, 140234378989567, +STORE, 140234378981376, 140234378985471, +STORE, 140234378985472, 140234378989567, +SNULL, 94088459554815, 94088459563007, +STORE, 94088459550720, 94088459554815, +STORE, 94088459554816, 94088459563007, +SNULL, 140234381234175, 140234381238271, +STORE, 140234381230080, 140234381234175, +STORE, 140234381234176, 140234381238271, +ERASE, 140234381201408, 140234381230079, +STORE, 94088468852736, 94088468987903, +STORE, 140234379501568, 140234381185023, +STORE, 94088468852736, 94088469123071, +STORE, 94088468852736, 94088469258239, +STORE, 94110050402304, 94110050615295, +STORE, 94110052712448, 94110052716543, +STORE, 94110052716544, 94110052724735, +STORE, 94110052724736, 94110052737023, +STORE, 94110061875200, 94110062415871, +STORE, 140139439357952, 140139441016831, +STORE, 140139441016832, 140139443113983, +STORE, 140139443113984, 140139443130367, +STORE, 140139443130368, 140139443138559, +STORE, 140139443138560, 140139443154943, +STORE, 140139443154944, 140139443167231, +STORE, 140139443167232, 140139445260287, +STORE, 140139445260288, 140139445264383, +STORE, 140139445264384, 140139445268479, +STORE, 140139445268480, 140139445411839, +STORE, 140139445780480, 140139447463935, +STORE, 140139447463936, 140139447480319, +STORE, 140139447508992, 140139447513087, +STORE, 140139447513088, 140139447517183, +STORE, 140139447517184, 140139447521279, +STORE, 140731901427712, 140731901566975, +STORE, 140731902259200, 140731902271487, +STORE, 140731902271488, 140731902275583, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140727282622464, 140737488351231, +SNULL, 140727282630655, 140737488351231, +STORE, 140727282622464, 140727282630655, +STORE, 140727282491392, 140727282630655, +STORE, 94266649866240, 94266652200959, +SNULL, 94266650079231, 94266652200959, +STORE, 94266649866240, 94266650079231, +STORE, 94266650079232, 94266652200959, +ERASE, 94266650079232, 94266652200959, +STORE, 94266652176384, 94266652188671, +STORE, 94266652188672, 94266652200959, +STORE, 139888497991680, 139888500244479, +SNULL, 139888498135039, 139888500244479, +STORE, 139888497991680, 139888498135039, +STORE, 139888498135040, 139888500244479, +ERASE, 139888498135040, 139888500244479, +STORE, 139888500232192, 139888500240383, +STORE, 139888500240384, 139888500244479, +STORE, 140727283113984, 140727283118079, +STORE, 140727283101696, 140727283113983, +STORE, 139888500203520, 139888500232191, +STORE, 139888500195328, 139888500203519, +STORE, 139888495878144, 139888497991679, +SNULL, 139888495878144, 139888495890431, +STORE, 139888495890432, 139888497991679, +STORE, 139888495878144, 139888495890431, +SNULL, 139888497983487, 139888497991679, +STORE, 139888495890432, 139888497983487, +STORE, 139888497983488, 139888497991679, +ERASE, 139888497983488, 139888497991679, +STORE, 139888497983488, 139888497991679, +STORE, 139888492081152, 139888495878143, +SNULL, 139888492081152, 139888493740031, +STORE, 139888493740032, 139888495878143, +STORE, 139888492081152, 139888493740031, +SNULL, 139888495837183, 139888495878143, +STORE, 139888493740032, 139888495837183, +STORE, 139888495837184, 139888495878143, +SNULL, 139888495837184, 139888495861759, +STORE, 139888495861760, 139888495878143, +STORE, 139888495837184, 139888495861759, +ERASE, 139888495837184, 139888495861759, +STORE, 139888495837184, 139888495861759, +ERASE, 139888495861760, 139888495878143, +STORE, 139888495861760, 139888495878143, +STORE, 139888500187136, 139888500203519, +SNULL, 139888495853567, 139888495861759, +STORE, 139888495837184, 139888495853567, +STORE, 139888495853568, 139888495861759, +SNULL, 139888497987583, 139888497991679, +STORE, 139888497983488, 139888497987583, +STORE, 139888497987584, 139888497991679, +SNULL, 94266652180479, 94266652188671, +STORE, 94266652176384, 94266652180479, +STORE, 94266652180480, 94266652188671, +SNULL, 139888500236287, 139888500240383, +STORE, 139888500232192, 139888500236287, +STORE, 139888500236288, 139888500240383, +ERASE, 139888500203520, 139888500232191, +STORE, 94266678542336, 94266678677503, +STORE, 139888498503680, 139888500187135, +STORE, 94266678542336, 94266678812671, +STORE, 94266678542336, 94266678947839, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722507702272, 140737488351231, +SNULL, 140722507710463, 140737488351231, +STORE, 140722507702272, 140722507710463, +STORE, 140722507571200, 140722507710463, +STORE, 94313981394944, 94313983729663, +SNULL, 94313981607935, 94313983729663, +STORE, 94313981394944, 94313981607935, +STORE, 94313981607936, 94313983729663, +ERASE, 94313981607936, 94313983729663, +STORE, 94313983705088, 94313983717375, +STORE, 94313983717376, 94313983729663, +STORE, 140456286076928, 140456288329727, +SNULL, 140456286220287, 140456288329727, +STORE, 140456286076928, 140456286220287, +STORE, 140456286220288, 140456288329727, +ERASE, 140456286220288, 140456288329727, +STORE, 140456288317440, 140456288325631, +STORE, 140456288325632, 140456288329727, +STORE, 140722507997184, 140722508001279, +STORE, 140722507984896, 140722507997183, +STORE, 140456288288768, 140456288317439, +STORE, 140456288280576, 140456288288767, +STORE, 140456283963392, 140456286076927, +SNULL, 140456283963392, 140456283975679, +STORE, 140456283975680, 140456286076927, +STORE, 140456283963392, 140456283975679, +SNULL, 140456286068735, 140456286076927, +STORE, 140456283975680, 140456286068735, +STORE, 140456286068736, 140456286076927, +ERASE, 140456286068736, 140456286076927, +STORE, 140456286068736, 140456286076927, +STORE, 140456280166400, 140456283963391, +SNULL, 140456280166400, 140456281825279, +STORE, 140456281825280, 140456283963391, +STORE, 140456280166400, 140456281825279, +SNULL, 140456283922431, 140456283963391, +STORE, 140456281825280, 140456283922431, +STORE, 140456283922432, 140456283963391, +SNULL, 140456283922432, 140456283947007, +STORE, 140456283947008, 140456283963391, +STORE, 140456283922432, 140456283947007, +ERASE, 140456283922432, 140456283947007, +STORE, 140456283922432, 140456283947007, +ERASE, 140456283947008, 140456283963391, +STORE, 140456283947008, 140456283963391, +STORE, 140456288272384, 140456288288767, +SNULL, 140456283938815, 140456283947007, +STORE, 140456283922432, 140456283938815, +STORE, 140456283938816, 140456283947007, +SNULL, 140456286072831, 140456286076927, +STORE, 140456286068736, 140456286072831, +STORE, 140456286072832, 140456286076927, +SNULL, 94313983709183, 94313983717375, +STORE, 94313983705088, 94313983709183, +STORE, 94313983709184, 94313983717375, +SNULL, 140456288321535, 140456288325631, +STORE, 140456288317440, 140456288321535, +STORE, 140456288321536, 140456288325631, +ERASE, 140456288288768, 140456288317439, +STORE, 94314006716416, 94314006851583, +STORE, 140456286588928, 140456288272383, +STORE, 94314006716416, 94314006986751, +STORE, 94314006716416, 94314007121919, +STORE, 93948644454400, 93948644667391, +STORE, 93948646764544, 93948646768639, +STORE, 93948646768640, 93948646776831, +STORE, 93948646776832, 93948646789119, +STORE, 93948664999936, 93948667142143, +STORE, 140187350659072, 140187352317951, +STORE, 140187352317952, 140187354415103, +STORE, 140187354415104, 140187354431487, +STORE, 140187354431488, 140187354439679, +STORE, 140187354439680, 140187354456063, +STORE, 140187354456064, 140187354468351, +STORE, 140187354468352, 140187356561407, +STORE, 140187356561408, 140187356565503, +STORE, 140187356565504, 140187356569599, +STORE, 140187356569600, 140187356712959, +STORE, 140187357081600, 140187358765055, +STORE, 140187358765056, 140187358781439, +STORE, 140187358810112, 140187358814207, +STORE, 140187358814208, 140187358818303, +STORE, 140187358818304, 140187358822399, +STORE, 140730484518912, 140730484658175, +STORE, 140730485690368, 140730485702655, +STORE, 140730485702656, 140730485706751, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721211551744, 140737488351231, +SNULL, 140721211559935, 140737488351231, +STORE, 140721211551744, 140721211559935, +STORE, 140721211420672, 140721211559935, +STORE, 94105221423104, 94105223757823, +SNULL, 94105221636095, 94105223757823, +STORE, 94105221423104, 94105221636095, +STORE, 94105221636096, 94105223757823, +ERASE, 94105221636096, 94105223757823, +STORE, 94105223733248, 94105223745535, +STORE, 94105223745536, 94105223757823, +STORE, 140474453676032, 140474455928831, +SNULL, 140474453819391, 140474455928831, +STORE, 140474453676032, 140474453819391, +STORE, 140474453819392, 140474455928831, +ERASE, 140474453819392, 140474455928831, +STORE, 140474455916544, 140474455924735, +STORE, 140474455924736, 140474455928831, +STORE, 140721211703296, 140721211707391, +STORE, 140721211691008, 140721211703295, +STORE, 140474455887872, 140474455916543, +STORE, 140474455879680, 140474455887871, +STORE, 140474451562496, 140474453676031, +SNULL, 140474451562496, 140474451574783, +STORE, 140474451574784, 140474453676031, +STORE, 140474451562496, 140474451574783, +SNULL, 140474453667839, 140474453676031, +STORE, 140474451574784, 140474453667839, +STORE, 140474453667840, 140474453676031, +ERASE, 140474453667840, 140474453676031, +STORE, 140474453667840, 140474453676031, +STORE, 140474447765504, 140474451562495, +SNULL, 140474447765504, 140474449424383, +STORE, 140474449424384, 140474451562495, +STORE, 140474447765504, 140474449424383, +SNULL, 140474451521535, 140474451562495, +STORE, 140474449424384, 140474451521535, +STORE, 140474451521536, 140474451562495, +SNULL, 140474451521536, 140474451546111, +STORE, 140474451546112, 140474451562495, +STORE, 140474451521536, 140474451546111, +ERASE, 140474451521536, 140474451546111, +STORE, 140474451521536, 140474451546111, +ERASE, 140474451546112, 140474451562495, +STORE, 140474451546112, 140474451562495, +STORE, 140474455871488, 140474455887871, +SNULL, 140474451537919, 140474451546111, +STORE, 140474451521536, 140474451537919, +STORE, 140474451537920, 140474451546111, +SNULL, 140474453671935, 140474453676031, +STORE, 140474453667840, 140474453671935, +STORE, 140474453671936, 140474453676031, +SNULL, 94105223737343, 94105223745535, +STORE, 94105223733248, 94105223737343, +STORE, 94105223737344, 94105223745535, +SNULL, 140474455920639, 140474455924735, +STORE, 140474455916544, 140474455920639, +STORE, 140474455920640, 140474455924735, +ERASE, 140474455887872, 140474455916543, +STORE, 94105238712320, 94105238847487, +STORE, 140474454188032, 140474455871487, +STORE, 94105238712320, 94105238982655, +STORE, 94105238712320, 94105239117823, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732356354048, 140737488351231, +SNULL, 140732356362239, 140737488351231, +STORE, 140732356354048, 140732356362239, +STORE, 140732356222976, 140732356362239, +STORE, 94461165989888, 94461168324607, +SNULL, 94461166202879, 94461168324607, +STORE, 94461165989888, 94461166202879, +STORE, 94461166202880, 94461168324607, +ERASE, 94461166202880, 94461168324607, +STORE, 94461168300032, 94461168312319, +STORE, 94461168312320, 94461168324607, +STORE, 140317255110656, 140317257363455, +SNULL, 140317255254015, 140317257363455, +STORE, 140317255110656, 140317255254015, +STORE, 140317255254016, 140317257363455, +ERASE, 140317255254016, 140317257363455, +STORE, 140317257351168, 140317257359359, +STORE, 140317257359360, 140317257363455, +STORE, 140732356583424, 140732356587519, +STORE, 140732356571136, 140732356583423, +STORE, 140317257322496, 140317257351167, +STORE, 140317257314304, 140317257322495, +STORE, 140317252997120, 140317255110655, +SNULL, 140317252997120, 140317253009407, +STORE, 140317253009408, 140317255110655, +STORE, 140317252997120, 140317253009407, +SNULL, 140317255102463, 140317255110655, +STORE, 140317253009408, 140317255102463, +STORE, 140317255102464, 140317255110655, +ERASE, 140317255102464, 140317255110655, +STORE, 140317255102464, 140317255110655, +STORE, 140317249200128, 140317252997119, +SNULL, 140317249200128, 140317250859007, +STORE, 140317250859008, 140317252997119, +STORE, 140317249200128, 140317250859007, +SNULL, 140317252956159, 140317252997119, +STORE, 140317250859008, 140317252956159, +STORE, 140317252956160, 140317252997119, +SNULL, 140317252956160, 140317252980735, +STORE, 140317252980736, 140317252997119, +STORE, 140317252956160, 140317252980735, +ERASE, 140317252956160, 140317252980735, +STORE, 140317252956160, 140317252980735, +ERASE, 140317252980736, 140317252997119, +STORE, 140317252980736, 140317252997119, +STORE, 140317257306112, 140317257322495, +SNULL, 140317252972543, 140317252980735, +STORE, 140317252956160, 140317252972543, +STORE, 140317252972544, 140317252980735, +SNULL, 140317255106559, 140317255110655, +STORE, 140317255102464, 140317255106559, +STORE, 140317255106560, 140317255110655, +SNULL, 94461168304127, 94461168312319, +STORE, 94461168300032, 94461168304127, +STORE, 94461168304128, 94461168312319, +SNULL, 140317257355263, 140317257359359, +STORE, 140317257351168, 140317257355263, +STORE, 140317257355264, 140317257359359, +ERASE, 140317257322496, 140317257351167, +STORE, 94461195268096, 94461195403263, +STORE, 140317255622656, 140317257306111, +STORE, 94461195268096, 94461195538431, +STORE, 94461195268096, 94461195673599, +STORE, 94110050402304, 94110050615295, +STORE, 94110052712448, 94110052716543, +STORE, 94110052716544, 94110052724735, +STORE, 94110052724736, 94110052737023, +STORE, 94110061875200, 94110062415871, +STORE, 140139439357952, 140139441016831, +STORE, 140139441016832, 140139443113983, +STORE, 140139443113984, 140139443130367, +STORE, 140139443130368, 140139443138559, +STORE, 140139443138560, 140139443154943, +STORE, 140139443154944, 140139443167231, +STORE, 140139443167232, 140139445260287, +STORE, 140139445260288, 140139445264383, +STORE, 140139445264384, 140139445268479, +STORE, 140139445268480, 140139445411839, +STORE, 140139445780480, 140139447463935, +STORE, 140139447463936, 140139447480319, +STORE, 140139447508992, 140139447513087, +STORE, 140139447513088, 140139447517183, +STORE, 140139447517184, 140139447521279, +STORE, 140731901427712, 140731901566975, +STORE, 140731902259200, 140731902271487, +STORE, 140731902271488, 140731902275583, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140720941613056, 140737488351231, +SNULL, 140720941621247, 140737488351231, +STORE, 140720941613056, 140720941621247, +STORE, 140720941481984, 140720941621247, +STORE, 93902377721856, 93902379945983, +SNULL, 93902377832447, 93902379945983, +STORE, 93902377721856, 93902377832447, +STORE, 93902377832448, 93902379945983, +ERASE, 93902377832448, 93902379945983, +STORE, 93902379925504, 93902379937791, +STORE, 93902379937792, 93902379945983, +STORE, 139836543635456, 139836545888255, +SNULL, 139836543778815, 139836545888255, +STORE, 139836543635456, 139836543778815, +STORE, 139836543778816, 139836545888255, +ERASE, 139836543778816, 139836545888255, +STORE, 139836545875968, 139836545884159, +STORE, 139836545884160, 139836545888255, +STORE, 140720941711360, 140720941715455, +STORE, 140720941699072, 140720941711359, +STORE, 139836545847296, 139836545875967, +STORE, 139836545839104, 139836545847295, +STORE, 139836539838464, 139836543635455, +SNULL, 139836539838464, 139836541497343, +STORE, 139836541497344, 139836543635455, +STORE, 139836539838464, 139836541497343, +SNULL, 139836543594495, 139836543635455, +STORE, 139836541497344, 139836543594495, +STORE, 139836543594496, 139836543635455, +SNULL, 139836543594496, 139836543619071, +STORE, 139836543619072, 139836543635455, +STORE, 139836543594496, 139836543619071, +ERASE, 139836543594496, 139836543619071, +STORE, 139836543594496, 139836543619071, +ERASE, 139836543619072, 139836543635455, +STORE, 139836543619072, 139836543635455, +SNULL, 139836543610879, 139836543619071, +STORE, 139836543594496, 139836543610879, +STORE, 139836543610880, 139836543619071, +SNULL, 93902379933695, 93902379937791, +STORE, 93902379925504, 93902379933695, +STORE, 93902379933696, 93902379937791, +SNULL, 139836545880063, 139836545884159, +STORE, 139836545875968, 139836545880063, +STORE, 139836545880064, 139836545884159, +ERASE, 139836545847296, 139836545875967, +STORE, 93902396891136, 93902397026303, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140736538206208, 140737488351231, +SNULL, 140736538214399, 140737488351231, +STORE, 140736538206208, 140736538214399, +STORE, 140736538075136, 140736538214399, +STORE, 94173471399936, 94173473734655, +SNULL, 94173471612927, 94173473734655, +STORE, 94173471399936, 94173471612927, +STORE, 94173471612928, 94173473734655, +ERASE, 94173471612928, 94173473734655, +STORE, 94173473710080, 94173473722367, +STORE, 94173473722368, 94173473734655, +STORE, 140035513556992, 140035515809791, +SNULL, 140035513700351, 140035515809791, +STORE, 140035513556992, 140035513700351, +STORE, 140035513700352, 140035515809791, +ERASE, 140035513700352, 140035515809791, +STORE, 140035515797504, 140035515805695, +STORE, 140035515805696, 140035515809791, +STORE, 140736538329088, 140736538333183, +STORE, 140736538316800, 140736538329087, +STORE, 140035515768832, 140035515797503, +STORE, 140035515760640, 140035515768831, +STORE, 140035511443456, 140035513556991, +SNULL, 140035511443456, 140035511455743, +STORE, 140035511455744, 140035513556991, +STORE, 140035511443456, 140035511455743, +SNULL, 140035513548799, 140035513556991, +STORE, 140035511455744, 140035513548799, +STORE, 140035513548800, 140035513556991, +ERASE, 140035513548800, 140035513556991, +STORE, 140035513548800, 140035513556991, +STORE, 140035507646464, 140035511443455, +SNULL, 140035507646464, 140035509305343, +STORE, 140035509305344, 140035511443455, +STORE, 140035507646464, 140035509305343, +SNULL, 140035511402495, 140035511443455, +STORE, 140035509305344, 140035511402495, +STORE, 140035511402496, 140035511443455, +SNULL, 140035511402496, 140035511427071, +STORE, 140035511427072, 140035511443455, +STORE, 140035511402496, 140035511427071, +ERASE, 140035511402496, 140035511427071, +STORE, 140035511402496, 140035511427071, +ERASE, 140035511427072, 140035511443455, +STORE, 140035511427072, 140035511443455, +STORE, 140035515752448, 140035515768831, +SNULL, 140035511418879, 140035511427071, +STORE, 140035511402496, 140035511418879, +STORE, 140035511418880, 140035511427071, +SNULL, 140035513552895, 140035513556991, +STORE, 140035513548800, 140035513552895, +STORE, 140035513552896, 140035513556991, +SNULL, 94173473714175, 94173473722367, +STORE, 94173473710080, 94173473714175, +STORE, 94173473714176, 94173473722367, +SNULL, 140035515801599, 140035515805695, +STORE, 140035515797504, 140035515801599, +STORE, 140035515801600, 140035515805695, +ERASE, 140035515768832, 140035515797503, +STORE, 94173478645760, 94173478780927, +STORE, 140035514068992, 140035515752447, +STORE, 94173478645760, 94173478916095, +STORE, 94173478645760, 94173479051263, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140724216176640, 140737488351231, +SNULL, 140724216184831, 140737488351231, +STORE, 140724216176640, 140724216184831, +STORE, 140724216045568, 140724216184831, +STORE, 94870930628608, 94870932963327, +SNULL, 94870930841599, 94870932963327, +STORE, 94870930628608, 94870930841599, +STORE, 94870930841600, 94870932963327, +ERASE, 94870930841600, 94870932963327, +STORE, 94870932938752, 94870932951039, +STORE, 94870932951040, 94870932963327, +STORE, 140453683736576, 140453685989375, +SNULL, 140453683879935, 140453685989375, +STORE, 140453683736576, 140453683879935, +STORE, 140453683879936, 140453685989375, +ERASE, 140453683879936, 140453685989375, +STORE, 140453685977088, 140453685985279, +STORE, 140453685985280, 140453685989375, +STORE, 140724216832000, 140724216836095, +STORE, 140724216819712, 140724216831999, +STORE, 140453685948416, 140453685977087, +STORE, 140453685940224, 140453685948415, +STORE, 140453681623040, 140453683736575, +SNULL, 140453681623040, 140453681635327, +STORE, 140453681635328, 140453683736575, +STORE, 140453681623040, 140453681635327, +SNULL, 140453683728383, 140453683736575, +STORE, 140453681635328, 140453683728383, +STORE, 140453683728384, 140453683736575, +ERASE, 140453683728384, 140453683736575, +STORE, 140453683728384, 140453683736575, +STORE, 140453677826048, 140453681623039, +SNULL, 140453677826048, 140453679484927, +STORE, 140453679484928, 140453681623039, +STORE, 140453677826048, 140453679484927, +SNULL, 140453681582079, 140453681623039, +STORE, 140453679484928, 140453681582079, +STORE, 140453681582080, 140453681623039, +SNULL, 140453681582080, 140453681606655, +STORE, 140453681606656, 140453681623039, +STORE, 140453681582080, 140453681606655, +ERASE, 140453681582080, 140453681606655, +STORE, 140453681582080, 140453681606655, +ERASE, 140453681606656, 140453681623039, +STORE, 140453681606656, 140453681623039, +STORE, 140453685932032, 140453685948415, +SNULL, 140453681598463, 140453681606655, +STORE, 140453681582080, 140453681598463, +STORE, 140453681598464, 140453681606655, +SNULL, 140453683732479, 140453683736575, +STORE, 140453683728384, 140453683732479, +STORE, 140453683732480, 140453683736575, +SNULL, 94870932942847, 94870932951039, +STORE, 94870932938752, 94870932942847, +STORE, 94870932942848, 94870932951039, +SNULL, 140453685981183, 140453685985279, +STORE, 140453685977088, 140453685981183, +STORE, 140453685981184, 140453685985279, +ERASE, 140453685948416, 140453685977087, +STORE, 94870940565504, 94870940700671, +STORE, 140453684248576, 140453685932031, +STORE, 94870940565504, 94870940835839, +STORE, 94870940565504, 94870940971007, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731275661312, 140737488351231, +SNULL, 140731275669503, 140737488351231, +STORE, 140731275661312, 140731275669503, +STORE, 140731275530240, 140731275669503, +STORE, 94642788548608, 94642790883327, +SNULL, 94642788761599, 94642790883327, +STORE, 94642788548608, 94642788761599, +STORE, 94642788761600, 94642790883327, +ERASE, 94642788761600, 94642790883327, +STORE, 94642790858752, 94642790871039, +STORE, 94642790871040, 94642790883327, +STORE, 140228458749952, 140228461002751, +SNULL, 140228458893311, 140228461002751, +STORE, 140228458749952, 140228458893311, +STORE, 140228458893312, 140228461002751, +ERASE, 140228458893312, 140228461002751, +STORE, 140228460990464, 140228460998655, +STORE, 140228460998656, 140228461002751, +STORE, 140731276349440, 140731276353535, +STORE, 140731276337152, 140731276349439, +STORE, 140228460961792, 140228460990463, +STORE, 140228460953600, 140228460961791, +STORE, 140228456636416, 140228458749951, +SNULL, 140228456636416, 140228456648703, +STORE, 140228456648704, 140228458749951, +STORE, 140228456636416, 140228456648703, +SNULL, 140228458741759, 140228458749951, +STORE, 140228456648704, 140228458741759, +STORE, 140228458741760, 140228458749951, +ERASE, 140228458741760, 140228458749951, +STORE, 140228458741760, 140228458749951, +STORE, 140228452839424, 140228456636415, +SNULL, 140228452839424, 140228454498303, +STORE, 140228454498304, 140228456636415, +STORE, 140228452839424, 140228454498303, +SNULL, 140228456595455, 140228456636415, +STORE, 140228454498304, 140228456595455, +STORE, 140228456595456, 140228456636415, +SNULL, 140228456595456, 140228456620031, +STORE, 140228456620032, 140228456636415, +STORE, 140228456595456, 140228456620031, +ERASE, 140228456595456, 140228456620031, +STORE, 140228456595456, 140228456620031, +ERASE, 140228456620032, 140228456636415, +STORE, 140228456620032, 140228456636415, +STORE, 140228460945408, 140228460961791, +SNULL, 140228456611839, 140228456620031, +STORE, 140228456595456, 140228456611839, +STORE, 140228456611840, 140228456620031, +SNULL, 140228458745855, 140228458749951, +STORE, 140228458741760, 140228458745855, +STORE, 140228458745856, 140228458749951, +SNULL, 94642790862847, 94642790871039, +STORE, 94642790858752, 94642790862847, +STORE, 94642790862848, 94642790871039, +SNULL, 140228460994559, 140228460998655, +STORE, 140228460990464, 140228460994559, +STORE, 140228460994560, 140228460998655, +ERASE, 140228460961792, 140228460990463, +STORE, 94642801549312, 94642801684479, +STORE, 140228459261952, 140228460945407, +STORE, 94642801549312, 94642801819647, +STORE, 94642801549312, 94642801954815, +STORE, 94604087611392, 94604087824383, +STORE, 94604089921536, 94604089925631, +STORE, 94604089925632, 94604089933823, +STORE, 94604089933824, 94604089946111, +STORE, 94604105125888, 94604106424319, +STORE, 140454937694208, 140454939353087, +STORE, 140454939353088, 140454941450239, +STORE, 140454941450240, 140454941466623, +STORE, 140454941466624, 140454941474815, +STORE, 140454941474816, 140454941491199, +STORE, 140454941491200, 140454941503487, +STORE, 140454941503488, 140454943596543, +STORE, 140454943596544, 140454943600639, +STORE, 140454943600640, 140454943604735, +STORE, 140454943604736, 140454943748095, +STORE, 140454944116736, 140454945800191, +STORE, 140454945800192, 140454945816575, +STORE, 140454945845248, 140454945849343, +STORE, 140454945849344, 140454945853439, +STORE, 140454945853440, 140454945857535, +STORE, 140728438214656, 140728438353919, +STORE, 140728439095296, 140728439107583, +STORE, 140728439107584, 140728439111679, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721843453952, 140737488351231, +SNULL, 140721843462143, 140737488351231, +STORE, 140721843453952, 140721843462143, +STORE, 140721843322880, 140721843462143, +STORE, 94465962455040, 94465964789759, +SNULL, 94465962668031, 94465964789759, +STORE, 94465962455040, 94465962668031, +STORE, 94465962668032, 94465964789759, +ERASE, 94465962668032, 94465964789759, +STORE, 94465964765184, 94465964777471, +STORE, 94465964777472, 94465964789759, +STORE, 139913488314368, 139913490567167, +SNULL, 139913488457727, 139913490567167, +STORE, 139913488314368, 139913488457727, +STORE, 139913488457728, 139913490567167, +ERASE, 139913488457728, 139913490567167, +STORE, 139913490554880, 139913490563071, +STORE, 139913490563072, 139913490567167, +STORE, 140721843503104, 140721843507199, +STORE, 140721843490816, 140721843503103, +STORE, 139913490526208, 139913490554879, +STORE, 139913490518016, 139913490526207, +STORE, 139913486200832, 139913488314367, +SNULL, 139913486200832, 139913486213119, +STORE, 139913486213120, 139913488314367, +STORE, 139913486200832, 139913486213119, +SNULL, 139913488306175, 139913488314367, +STORE, 139913486213120, 139913488306175, +STORE, 139913488306176, 139913488314367, +ERASE, 139913488306176, 139913488314367, +STORE, 139913488306176, 139913488314367, +STORE, 139913482403840, 139913486200831, +SNULL, 139913482403840, 139913484062719, +STORE, 139913484062720, 139913486200831, +STORE, 139913482403840, 139913484062719, +SNULL, 139913486159871, 139913486200831, +STORE, 139913484062720, 139913486159871, +STORE, 139913486159872, 139913486200831, +SNULL, 139913486159872, 139913486184447, +STORE, 139913486184448, 139913486200831, +STORE, 139913486159872, 139913486184447, +ERASE, 139913486159872, 139913486184447, +STORE, 139913486159872, 139913486184447, +ERASE, 139913486184448, 139913486200831, +STORE, 139913486184448, 139913486200831, +STORE, 139913490509824, 139913490526207, +SNULL, 139913486176255, 139913486184447, +STORE, 139913486159872, 139913486176255, +STORE, 139913486176256, 139913486184447, +SNULL, 139913488310271, 139913488314367, +STORE, 139913488306176, 139913488310271, +STORE, 139913488310272, 139913488314367, +SNULL, 94465964769279, 94465964777471, +STORE, 94465964765184, 94465964769279, +STORE, 94465964769280, 94465964777471, +SNULL, 139913490558975, 139913490563071, +STORE, 139913490554880, 139913490558975, +STORE, 139913490558976, 139913490563071, +ERASE, 139913490526208, 139913490554879, +STORE, 94465970024448, 94465970159615, +STORE, 139913488826368, 139913490509823, +STORE, 94465970024448, 94465970294783, +STORE, 94465970024448, 94465970429951, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140720583307264, 140737488351231, +SNULL, 140720583315455, 140737488351231, +STORE, 140720583307264, 140720583315455, +STORE, 140720583176192, 140720583315455, +STORE, 94212322082816, 94212324417535, +SNULL, 94212322295807, 94212324417535, +STORE, 94212322082816, 94212322295807, +STORE, 94212322295808, 94212324417535, +ERASE, 94212322295808, 94212324417535, +STORE, 94212324392960, 94212324405247, +STORE, 94212324405248, 94212324417535, +STORE, 139659688538112, 139659690790911, +SNULL, 139659688681471, 139659690790911, +STORE, 139659688538112, 139659688681471, +STORE, 139659688681472, 139659690790911, +ERASE, 139659688681472, 139659690790911, +STORE, 139659690778624, 139659690786815, +STORE, 139659690786816, 139659690790911, +STORE, 140720584781824, 140720584785919, +STORE, 140720584769536, 140720584781823, +STORE, 139659690749952, 139659690778623, +STORE, 139659690741760, 139659690749951, +STORE, 139659686424576, 139659688538111, +SNULL, 139659686424576, 139659686436863, +STORE, 139659686436864, 139659688538111, +STORE, 139659686424576, 139659686436863, +SNULL, 139659688529919, 139659688538111, +STORE, 139659686436864, 139659688529919, +STORE, 139659688529920, 139659688538111, +ERASE, 139659688529920, 139659688538111, +STORE, 139659688529920, 139659688538111, +STORE, 139659682627584, 139659686424575, +SNULL, 139659682627584, 139659684286463, +STORE, 139659684286464, 139659686424575, +STORE, 139659682627584, 139659684286463, +SNULL, 139659686383615, 139659686424575, +STORE, 139659684286464, 139659686383615, +STORE, 139659686383616, 139659686424575, +SNULL, 139659686383616, 139659686408191, +STORE, 139659686408192, 139659686424575, +STORE, 139659686383616, 139659686408191, +ERASE, 139659686383616, 139659686408191, +STORE, 139659686383616, 139659686408191, +ERASE, 139659686408192, 139659686424575, +STORE, 139659686408192, 139659686424575, +STORE, 139659690733568, 139659690749951, +SNULL, 139659686399999, 139659686408191, +STORE, 139659686383616, 139659686399999, +STORE, 139659686400000, 139659686408191, +SNULL, 139659688534015, 139659688538111, +STORE, 139659688529920, 139659688534015, +STORE, 139659688534016, 139659688538111, +SNULL, 94212324397055, 94212324405247, +STORE, 94212324392960, 94212324397055, +STORE, 94212324397056, 94212324405247, +SNULL, 139659690782719, 139659690786815, +STORE, 139659690778624, 139659690782719, +STORE, 139659690782720, 139659690786815, +ERASE, 139659690749952, 139659690778623, +STORE, 94212355014656, 94212355149823, +STORE, 139659689050112, 139659690733567, +STORE, 94212355014656, 94212355284991, +STORE, 94212355014656, 94212355420159, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140727689830400, 140737488351231, +SNULL, 140727689838591, 140737488351231, +STORE, 140727689830400, 140727689838591, +STORE, 140727689699328, 140727689838591, +STORE, 94572390281216, 94572392615935, +SNULL, 94572390494207, 94572392615935, +STORE, 94572390281216, 94572390494207, +STORE, 94572390494208, 94572392615935, +ERASE, 94572390494208, 94572392615935, +STORE, 94572392591360, 94572392603647, +STORE, 94572392603648, 94572392615935, +STORE, 140575923769344, 140575926022143, +SNULL, 140575923912703, 140575926022143, +STORE, 140575923769344, 140575923912703, +STORE, 140575923912704, 140575926022143, +ERASE, 140575923912704, 140575926022143, +STORE, 140575926009856, 140575926018047, +STORE, 140575926018048, 140575926022143, +STORE, 140727689871360, 140727689875455, +STORE, 140727689859072, 140727689871359, +STORE, 140575925981184, 140575926009855, +STORE, 140575925972992, 140575925981183, +STORE, 140575921655808, 140575923769343, +SNULL, 140575921655808, 140575921668095, +STORE, 140575921668096, 140575923769343, +STORE, 140575921655808, 140575921668095, +SNULL, 140575923761151, 140575923769343, +STORE, 140575921668096, 140575923761151, +STORE, 140575923761152, 140575923769343, +ERASE, 140575923761152, 140575923769343, +STORE, 140575923761152, 140575923769343, +STORE, 140575917858816, 140575921655807, +SNULL, 140575917858816, 140575919517695, +STORE, 140575919517696, 140575921655807, +STORE, 140575917858816, 140575919517695, +SNULL, 140575921614847, 140575921655807, +STORE, 140575919517696, 140575921614847, +STORE, 140575921614848, 140575921655807, +SNULL, 140575921614848, 140575921639423, +STORE, 140575921639424, 140575921655807, +STORE, 140575921614848, 140575921639423, +ERASE, 140575921614848, 140575921639423, +STORE, 140575921614848, 140575921639423, +ERASE, 140575921639424, 140575921655807, +STORE, 140575921639424, 140575921655807, +STORE, 140575925964800, 140575925981183, +SNULL, 140575921631231, 140575921639423, +STORE, 140575921614848, 140575921631231, +STORE, 140575921631232, 140575921639423, +SNULL, 140575923765247, 140575923769343, +STORE, 140575923761152, 140575923765247, +STORE, 140575923765248, 140575923769343, +SNULL, 94572392595455, 94572392603647, +STORE, 94572392591360, 94572392595455, +STORE, 94572392595456, 94572392603647, +SNULL, 140575926013951, 140575926018047, +STORE, 140575926009856, 140575926013951, +STORE, 140575926013952, 140575926018047, +ERASE, 140575925981184, 140575926009855, +STORE, 94572402278400, 94572402413567, +STORE, 140575924281344, 140575925964799, +STORE, 94572402278400, 94572402548735, +STORE, 94572402278400, 94572402683903, +STORE, 94572402278400, 94572402851839, +SNULL, 94572402827263, 94572402851839, +STORE, 94572402278400, 94572402827263, +STORE, 94572402827264, 94572402851839, +ERASE, 94572402827264, 94572402851839, +STORE, 94572402278400, 94572402966527, +STORE, 94572402278400, 94572403109887, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725520506880, 140737488351231, +SNULL, 140725520515071, 140737488351231, +STORE, 140725520506880, 140725520515071, +STORE, 140725520375808, 140725520515071, +STORE, 93829948788736, 93829951012863, +SNULL, 93829948899327, 93829951012863, +STORE, 93829948788736, 93829948899327, +STORE, 93829948899328, 93829951012863, +ERASE, 93829948899328, 93829951012863, +STORE, 93829950992384, 93829951004671, +STORE, 93829951004672, 93829951012863, +STORE, 140133696794624, 140133699047423, +SNULL, 140133696937983, 140133699047423, +STORE, 140133696794624, 140133696937983, +STORE, 140133696937984, 140133699047423, +ERASE, 140133696937984, 140133699047423, +STORE, 140133699035136, 140133699043327, +STORE, 140133699043328, 140133699047423, +STORE, 140725520875520, 140725520879615, +STORE, 140725520863232, 140725520875519, +STORE, 140133699006464, 140133699035135, +STORE, 140133698998272, 140133699006463, +STORE, 140133692997632, 140133696794623, +SNULL, 140133692997632, 140133694656511, +STORE, 140133694656512, 140133696794623, +STORE, 140133692997632, 140133694656511, +SNULL, 140133696753663, 140133696794623, +STORE, 140133694656512, 140133696753663, +STORE, 140133696753664, 140133696794623, +SNULL, 140133696753664, 140133696778239, +STORE, 140133696778240, 140133696794623, +STORE, 140133696753664, 140133696778239, +ERASE, 140133696753664, 140133696778239, +STORE, 140133696753664, 140133696778239, +ERASE, 140133696778240, 140133696794623, +STORE, 140133696778240, 140133696794623, +SNULL, 140133696770047, 140133696778239, +STORE, 140133696753664, 140133696770047, +STORE, 140133696770048, 140133696778239, +SNULL, 93829951000575, 93829951004671, +STORE, 93829950992384, 93829951000575, +STORE, 93829951000576, 93829951004671, +SNULL, 140133699039231, 140133699043327, +STORE, 140133699035136, 140133699039231, +STORE, 140133699039232, 140133699043327, +ERASE, 140133699006464, 140133699035135, +STORE, 93829978693632, 93829978828799, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140736118022144, 140737488351231, +SNULL, 140736118030335, 140737488351231, +STORE, 140736118022144, 140736118030335, +STORE, 140736117891072, 140736118030335, +STORE, 94467663982592, 94467666206719, +SNULL, 94467664093183, 94467666206719, +STORE, 94467663982592, 94467664093183, +STORE, 94467664093184, 94467666206719, +ERASE, 94467664093184, 94467666206719, +STORE, 94467666186240, 94467666198527, +STORE, 94467666198528, 94467666206719, +STORE, 140525377327104, 140525379579903, +SNULL, 140525377470463, 140525379579903, +STORE, 140525377327104, 140525377470463, +STORE, 140525377470464, 140525379579903, +ERASE, 140525377470464, 140525379579903, +STORE, 140525379567616, 140525379575807, +STORE, 140525379575808, 140525379579903, +STORE, 140736118771712, 140736118775807, +STORE, 140736118759424, 140736118771711, +STORE, 140525379538944, 140525379567615, +STORE, 140525379530752, 140525379538943, +STORE, 140525373530112, 140525377327103, +SNULL, 140525373530112, 140525375188991, +STORE, 140525375188992, 140525377327103, +STORE, 140525373530112, 140525375188991, +SNULL, 140525377286143, 140525377327103, +STORE, 140525375188992, 140525377286143, +STORE, 140525377286144, 140525377327103, +SNULL, 140525377286144, 140525377310719, +STORE, 140525377310720, 140525377327103, +STORE, 140525377286144, 140525377310719, +ERASE, 140525377286144, 140525377310719, +STORE, 140525377286144, 140525377310719, +ERASE, 140525377310720, 140525377327103, +STORE, 140525377310720, 140525377327103, +SNULL, 140525377302527, 140525377310719, +STORE, 140525377286144, 140525377302527, +STORE, 140525377302528, 140525377310719, +SNULL, 94467666194431, 94467666198527, +STORE, 94467666186240, 94467666194431, +STORE, 94467666194432, 94467666198527, +SNULL, 140525379571711, 140525379575807, +STORE, 140525379567616, 140525379571711, +STORE, 140525379571712, 140525379575807, +ERASE, 140525379538944, 140525379567615, +STORE, 94467693379584, 94467693514751, +STORE, 94200172744704, 94200172957695, +STORE, 94200175054848, 94200175058943, +STORE, 94200175058944, 94200175067135, +STORE, 94200175067136, 94200175079423, +STORE, 94200196673536, 94200198905855, +STORE, 140053867720704, 140053869379583, +STORE, 140053869379584, 140053871476735, +STORE, 140053871476736, 140053871493119, +STORE, 140053871493120, 140053871501311, +STORE, 140053871501312, 140053871517695, +STORE, 140053871517696, 140053871529983, +STORE, 140053871529984, 140053873623039, +STORE, 140053873623040, 140053873627135, +STORE, 140053873627136, 140053873631231, +STORE, 140053873631232, 140053873774591, +STORE, 140053874143232, 140053875826687, +STORE, 140053875826688, 140053875843071, +STORE, 140053875871744, 140053875875839, +STORE, 140053875875840, 140053875879935, +STORE, 140053875879936, 140053875884031, +STORE, 140728538484736, 140728538623999, +STORE, 140728538652672, 140728538664959, +STORE, 140728538664960, 140728538669055, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732307775488, 140737488351231, +SNULL, 140732307783679, 140737488351231, +STORE, 140732307775488, 140732307783679, +STORE, 140732307644416, 140732307783679, +STORE, 93831417630720, 93831419965439, +SNULL, 93831417843711, 93831419965439, +STORE, 93831417630720, 93831417843711, +STORE, 93831417843712, 93831419965439, +ERASE, 93831417843712, 93831419965439, +STORE, 93831419940864, 93831419953151, +STORE, 93831419953152, 93831419965439, +STORE, 140241062088704, 140241064341503, +SNULL, 140241062232063, 140241064341503, +STORE, 140241062088704, 140241062232063, +STORE, 140241062232064, 140241064341503, +ERASE, 140241062232064, 140241064341503, +STORE, 140241064329216, 140241064337407, +STORE, 140241064337408, 140241064341503, +STORE, 140732308140032, 140732308144127, +STORE, 140732308127744, 140732308140031, +STORE, 140241064300544, 140241064329215, +STORE, 140241064292352, 140241064300543, +STORE, 140241059975168, 140241062088703, +SNULL, 140241059975168, 140241059987455, +STORE, 140241059987456, 140241062088703, +STORE, 140241059975168, 140241059987455, +SNULL, 140241062080511, 140241062088703, +STORE, 140241059987456, 140241062080511, +STORE, 140241062080512, 140241062088703, +ERASE, 140241062080512, 140241062088703, +STORE, 140241062080512, 140241062088703, +STORE, 140241056178176, 140241059975167, +SNULL, 140241056178176, 140241057837055, +STORE, 140241057837056, 140241059975167, +STORE, 140241056178176, 140241057837055, +SNULL, 140241059934207, 140241059975167, +STORE, 140241057837056, 140241059934207, +STORE, 140241059934208, 140241059975167, +SNULL, 140241059934208, 140241059958783, +STORE, 140241059958784, 140241059975167, +STORE, 140241059934208, 140241059958783, +ERASE, 140241059934208, 140241059958783, +STORE, 140241059934208, 140241059958783, +ERASE, 140241059958784, 140241059975167, +STORE, 140241059958784, 140241059975167, +STORE, 140241064284160, 140241064300543, +SNULL, 140241059950591, 140241059958783, +STORE, 140241059934208, 140241059950591, +STORE, 140241059950592, 140241059958783, +SNULL, 140241062084607, 140241062088703, +STORE, 140241062080512, 140241062084607, +STORE, 140241062084608, 140241062088703, +SNULL, 93831419944959, 93831419953151, +STORE, 93831419940864, 93831419944959, +STORE, 93831419944960, 93831419953151, +SNULL, 140241064333311, 140241064337407, +STORE, 140241064329216, 140241064333311, +STORE, 140241064333312, 140241064337407, +ERASE, 140241064300544, 140241064329215, +STORE, 93831435284480, 93831435419647, +STORE, 140241062600704, 140241064284159, +STORE, 93831435284480, 93831435554815, +STORE, 93831435284480, 93831435689983, +STORE, 93831435284480, 93831435862015, +SNULL, 93831435837439, 93831435862015, +STORE, 93831435284480, 93831435837439, +STORE, 93831435837440, 93831435862015, +ERASE, 93831435837440, 93831435862015, +STORE, 93831435284480, 93831435972607, +STORE, 93831435284480, 93831436107775, +SNULL, 93831436091391, 93831436107775, +STORE, 93831435284480, 93831436091391, +STORE, 93831436091392, 93831436107775, +ERASE, 93831436091392, 93831436107775, +STORE, 93831435284480, 93831436226559, +STORE, 93831435284480, 93831436361727, +STORE, 93831435284480, 93831436505087, +STORE, 93831435284480, 93831436652543, +STORE, 93831435284480, 93831436787711, +STORE, 93831435284480, 93831436926975, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140728546775040, 140737488351231, +SNULL, 140728546783231, 140737488351231, +STORE, 140728546775040, 140728546783231, +STORE, 140728546643968, 140728546783231, +STORE, 94456178786304, 94456181010431, +SNULL, 94456178896895, 94456181010431, +STORE, 94456178786304, 94456178896895, +STORE, 94456178896896, 94456181010431, +ERASE, 94456178896896, 94456181010431, +STORE, 94456180989952, 94456181002239, +STORE, 94456181002240, 94456181010431, +STORE, 140221893091328, 140221895344127, +SNULL, 140221893234687, 140221895344127, +STORE, 140221893091328, 140221893234687, +STORE, 140221893234688, 140221895344127, +ERASE, 140221893234688, 140221895344127, +STORE, 140221895331840, 140221895340031, +STORE, 140221895340032, 140221895344127, +STORE, 140728547803136, 140728547807231, +STORE, 140728547790848, 140728547803135, +STORE, 140221895303168, 140221895331839, +STORE, 140221895294976, 140221895303167, +STORE, 140221889294336, 140221893091327, +SNULL, 140221889294336, 140221890953215, +STORE, 140221890953216, 140221893091327, +STORE, 140221889294336, 140221890953215, +SNULL, 140221893050367, 140221893091327, +STORE, 140221890953216, 140221893050367, +STORE, 140221893050368, 140221893091327, +SNULL, 140221893050368, 140221893074943, +STORE, 140221893074944, 140221893091327, +STORE, 140221893050368, 140221893074943, +ERASE, 140221893050368, 140221893074943, +STORE, 140221893050368, 140221893074943, +ERASE, 140221893074944, 140221893091327, +STORE, 140221893074944, 140221893091327, +SNULL, 140221893066751, 140221893074943, +STORE, 140221893050368, 140221893066751, +STORE, 140221893066752, 140221893074943, +SNULL, 94456180998143, 94456181002239, +STORE, 94456180989952, 94456180998143, +STORE, 94456180998144, 94456181002239, +SNULL, 140221895335935, 140221895340031, +STORE, 140221895331840, 140221895335935, +STORE, 140221895335936, 140221895340031, +ERASE, 140221895303168, 140221895331839, +STORE, 94456203730944, 94456203866111, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140734438637568, 140737488351231, +SNULL, 140734438645759, 140737488351231, +STORE, 140734438637568, 140734438645759, +STORE, 140734438506496, 140734438645759, +STORE, 94652233351168, 94652235575295, +SNULL, 94652233461759, 94652235575295, +STORE, 94652233351168, 94652233461759, +STORE, 94652233461760, 94652235575295, +ERASE, 94652233461760, 94652235575295, +STORE, 94652235554816, 94652235567103, +STORE, 94652235567104, 94652235575295, +STORE, 140536493195264, 140536495448063, +SNULL, 140536493338623, 140536495448063, +STORE, 140536493195264, 140536493338623, +STORE, 140536493338624, 140536495448063, +ERASE, 140536493338624, 140536495448063, +STORE, 140536495435776, 140536495443967, +STORE, 140536495443968, 140536495448063, +STORE, 140734439002112, 140734439006207, +STORE, 140734438989824, 140734439002111, +STORE, 140536495407104, 140536495435775, +STORE, 140536495398912, 140536495407103, +STORE, 140536489398272, 140536493195263, +SNULL, 140536489398272, 140536491057151, +STORE, 140536491057152, 140536493195263, +STORE, 140536489398272, 140536491057151, +SNULL, 140536493154303, 140536493195263, +STORE, 140536491057152, 140536493154303, +STORE, 140536493154304, 140536493195263, +SNULL, 140536493154304, 140536493178879, +STORE, 140536493178880, 140536493195263, +STORE, 140536493154304, 140536493178879, +ERASE, 140536493154304, 140536493178879, +STORE, 140536493154304, 140536493178879, +ERASE, 140536493178880, 140536493195263, +STORE, 140536493178880, 140536493195263, +SNULL, 140536493170687, 140536493178879, +STORE, 140536493154304, 140536493170687, +STORE, 140536493170688, 140536493178879, +SNULL, 94652235563007, 94652235567103, +STORE, 94652235554816, 94652235563007, +STORE, 94652235563008, 94652235567103, +SNULL, 140536495439871, 140536495443967, +STORE, 140536495435776, 140536495439871, +STORE, 140536495439872, 140536495443967, +ERASE, 140536495407104, 140536495435775, +STORE, 94652265619456, 94652265754623, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140721814200320, 140737488351231, +SNULL, 140721814208511, 140737488351231, +STORE, 140721814200320, 140721814208511, +STORE, 140721814069248, 140721814208511, +STORE, 94062800691200, 94062802915327, +SNULL, 94062800801791, 94062802915327, +STORE, 94062800691200, 94062800801791, +STORE, 94062800801792, 94062802915327, +ERASE, 94062800801792, 94062802915327, +STORE, 94062802894848, 94062802907135, +STORE, 94062802907136, 94062802915327, +STORE, 139717739700224, 139717741953023, +SNULL, 139717739843583, 139717741953023, +STORE, 139717739700224, 139717739843583, +STORE, 139717739843584, 139717741953023, +ERASE, 139717739843584, 139717741953023, +STORE, 139717741940736, 139717741948927, +STORE, 139717741948928, 139717741953023, +STORE, 140721814224896, 140721814228991, +STORE, 140721814212608, 140721814224895, +STORE, 139717741912064, 139717741940735, +STORE, 139717741903872, 139717741912063, +STORE, 139717735903232, 139717739700223, +SNULL, 139717735903232, 139717737562111, +STORE, 139717737562112, 139717739700223, +STORE, 139717735903232, 139717737562111, +SNULL, 139717739659263, 139717739700223, +STORE, 139717737562112, 139717739659263, +STORE, 139717739659264, 139717739700223, +SNULL, 139717739659264, 139717739683839, +STORE, 139717739683840, 139717739700223, +STORE, 139717739659264, 139717739683839, +ERASE, 139717739659264, 139717739683839, +STORE, 139717739659264, 139717739683839, +ERASE, 139717739683840, 139717739700223, +STORE, 139717739683840, 139717739700223, +SNULL, 139717739675647, 139717739683839, +STORE, 139717739659264, 139717739675647, +STORE, 139717739675648, 139717739683839, +SNULL, 94062802903039, 94062802907135, +STORE, 94062802894848, 94062802903039, +STORE, 94062802903040, 94062802907135, +SNULL, 139717741944831, 139717741948927, +STORE, 139717741940736, 139717741944831, +STORE, 139717741944832, 139717741948927, +ERASE, 139717741912064, 139717741940735, +STORE, 94062814060544, 94062814195711, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723945754624, 140737488351231, +SNULL, 140723945762815, 140737488351231, +STORE, 140723945754624, 140723945762815, +STORE, 140723945623552, 140723945762815, +STORE, 94886119305216, 94886121639935, +SNULL, 94886119518207, 94886121639935, +STORE, 94886119305216, 94886119518207, +STORE, 94886119518208, 94886121639935, +ERASE, 94886119518208, 94886121639935, +STORE, 94886121615360, 94886121627647, +STORE, 94886121627648, 94886121639935, +STORE, 140152532131840, 140152534384639, +SNULL, 140152532275199, 140152534384639, +STORE, 140152532131840, 140152532275199, +STORE, 140152532275200, 140152534384639, +ERASE, 140152532275200, 140152534384639, +STORE, 140152534372352, 140152534380543, +STORE, 140152534380544, 140152534384639, +STORE, 140723946213376, 140723946217471, +STORE, 140723946201088, 140723946213375, +STORE, 140152534343680, 140152534372351, +STORE, 140152534335488, 140152534343679, +STORE, 140152530018304, 140152532131839, +SNULL, 140152530018304, 140152530030591, +STORE, 140152530030592, 140152532131839, +STORE, 140152530018304, 140152530030591, +SNULL, 140152532123647, 140152532131839, +STORE, 140152530030592, 140152532123647, +STORE, 140152532123648, 140152532131839, +ERASE, 140152532123648, 140152532131839, +STORE, 140152532123648, 140152532131839, +STORE, 140152526221312, 140152530018303, +SNULL, 140152526221312, 140152527880191, +STORE, 140152527880192, 140152530018303, +STORE, 140152526221312, 140152527880191, +SNULL, 140152529977343, 140152530018303, +STORE, 140152527880192, 140152529977343, +STORE, 140152529977344, 140152530018303, +SNULL, 140152529977344, 140152530001919, +STORE, 140152530001920, 140152530018303, +STORE, 140152529977344, 140152530001919, +ERASE, 140152529977344, 140152530001919, +STORE, 140152529977344, 140152530001919, +ERASE, 140152530001920, 140152530018303, +STORE, 140152530001920, 140152530018303, +STORE, 140152534327296, 140152534343679, +SNULL, 140152529993727, 140152530001919, +STORE, 140152529977344, 140152529993727, +STORE, 140152529993728, 140152530001919, +SNULL, 140152532127743, 140152532131839, +STORE, 140152532123648, 140152532127743, +STORE, 140152532127744, 140152532131839, +SNULL, 94886121619455, 94886121627647, +STORE, 94886121615360, 94886121619455, +STORE, 94886121619456, 94886121627647, +SNULL, 140152534376447, 140152534380543, +STORE, 140152534372352, 140152534376447, +STORE, 140152534376448, 140152534380543, +ERASE, 140152534343680, 140152534372351, +STORE, 94886129770496, 94886129905663, +STORE, 140152532643840, 140152534327295, +STORE, 94886129770496, 94886130040831, +STORE, 94886129770496, 94886130175999, +STORE, 94886129770496, 94886130348031, +SNULL, 94886130323455, 94886130348031, +STORE, 94886129770496, 94886130323455, +STORE, 94886130323456, 94886130348031, +ERASE, 94886130323456, 94886130348031, +STORE, 94886129770496, 94886130458623, +STORE, 94886129770496, 94886130606079, +SNULL, 94886130573311, 94886130606079, +STORE, 94886129770496, 94886130573311, +STORE, 94886130573312, 94886130606079, +ERASE, 94886130573312, 94886130606079, +STORE, 94886129770496, 94886130724863, +STORE, 94886129770496, 94886130876415, +STORE, 94886129770496, 94886131023871, +STORE, 94886129770496, 94886131175423, +STORE, 94886129770496, 94886131318783, +STORE, 94886129770496, 94886131453951, +SNULL, 94886131449855, 94886131453951, +STORE, 94886129770496, 94886131449855, +STORE, 94886131449856, 94886131453951, +ERASE, 94886131449856, 94886131453951, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735450779648, 140737488351231, +SNULL, 140735450787839, 140737488351231, +STORE, 140735450779648, 140735450787839, +STORE, 140735450648576, 140735450787839, +STORE, 93947794079744, 93947796414463, +SNULL, 93947794292735, 93947796414463, +STORE, 93947794079744, 93947794292735, +STORE, 93947794292736, 93947796414463, +ERASE, 93947794292736, 93947796414463, +STORE, 93947796389888, 93947796402175, +STORE, 93947796402176, 93947796414463, +STORE, 139841993433088, 139841995685887, +SNULL, 139841993576447, 139841995685887, +STORE, 139841993433088, 139841993576447, +STORE, 139841993576448, 139841995685887, +ERASE, 139841993576448, 139841995685887, +STORE, 139841995673600, 139841995681791, +STORE, 139841995681792, 139841995685887, +STORE, 140735451308032, 140735451312127, +STORE, 140735451295744, 140735451308031, +STORE, 139841995644928, 139841995673599, +STORE, 139841995636736, 139841995644927, +STORE, 139841991319552, 139841993433087, +SNULL, 139841991319552, 139841991331839, +STORE, 139841991331840, 139841993433087, +STORE, 139841991319552, 139841991331839, +SNULL, 139841993424895, 139841993433087, +STORE, 139841991331840, 139841993424895, +STORE, 139841993424896, 139841993433087, +ERASE, 139841993424896, 139841993433087, +STORE, 139841993424896, 139841993433087, +STORE, 139841987522560, 139841991319551, +SNULL, 139841987522560, 139841989181439, +STORE, 139841989181440, 139841991319551, +STORE, 139841987522560, 139841989181439, +SNULL, 139841991278591, 139841991319551, +STORE, 139841989181440, 139841991278591, +STORE, 139841991278592, 139841991319551, +SNULL, 139841991278592, 139841991303167, +STORE, 139841991303168, 139841991319551, +STORE, 139841991278592, 139841991303167, +ERASE, 139841991278592, 139841991303167, +STORE, 139841991278592, 139841991303167, +ERASE, 139841991303168, 139841991319551, +STORE, 139841991303168, 139841991319551, +STORE, 139841995628544, 139841995644927, +SNULL, 139841991294975, 139841991303167, +STORE, 139841991278592, 139841991294975, +STORE, 139841991294976, 139841991303167, +SNULL, 139841993428991, 139841993433087, +STORE, 139841993424896, 139841993428991, +STORE, 139841993428992, 139841993433087, +SNULL, 93947796393983, 93947796402175, +STORE, 93947796389888, 93947796393983, +STORE, 93947796393984, 93947796402175, +SNULL, 139841995677695, 139841995681791, +STORE, 139841995673600, 139841995677695, +STORE, 139841995677696, 139841995681791, +ERASE, 139841995644928, 139841995673599, +STORE, 93947829739520, 93947829874687, +STORE, 139841993945088, 139841995628543, +STORE, 93947829739520, 93947830009855, +STORE, 93947829739520, 93947830145023, +STORE, 94659351814144, 94659352027135, +STORE, 94659354124288, 94659354128383, +STORE, 94659354128384, 94659354136575, +STORE, 94659354136576, 94659354148863, +STORE, 94659383476224, 94659385057279, +STORE, 139959054557184, 139959056216063, +STORE, 139959056216064, 139959058313215, +STORE, 139959058313216, 139959058329599, +STORE, 139959058329600, 139959058337791, +STORE, 139959058337792, 139959058354175, +STORE, 139959058354176, 139959058366463, +STORE, 139959058366464, 139959060459519, +STORE, 139959060459520, 139959060463615, +STORE, 139959060463616, 139959060467711, +STORE, 139959060467712, 139959060611071, +STORE, 139959060979712, 139959062663167, +STORE, 139959062663168, 139959062679551, +STORE, 139959062708224, 139959062712319, +STORE, 139959062712320, 139959062716415, +STORE, 139959062716416, 139959062720511, +STORE, 140735532539904, 140735532679167, +STORE, 140735532830720, 140735532843007, +STORE, 140735532843008, 140735532847103, +STORE, 93894361829376, 93894362042367, +STORE, 93894364139520, 93894364143615, +STORE, 93894364143616, 93894364151807, +STORE, 93894364151808, 93894364164095, +STORE, 93894396944384, 93894397624319, +STORE, 140075612573696, 140075614232575, +STORE, 140075614232576, 140075616329727, +STORE, 140075616329728, 140075616346111, +STORE, 140075616346112, 140075616354303, +STORE, 140075616354304, 140075616370687, +STORE, 140075616370688, 140075616382975, +STORE, 140075616382976, 140075618476031, +STORE, 140075618476032, 140075618480127, +STORE, 140075618480128, 140075618484223, +STORE, 140075618484224, 140075618627583, +STORE, 140075618996224, 140075620679679, +STORE, 140075620679680, 140075620696063, +STORE, 140075620724736, 140075620728831, +STORE, 140075620728832, 140075620732927, +STORE, 140075620732928, 140075620737023, +STORE, 140720830312448, 140720830451711, +STORE, 140720830631936, 140720830644223, +STORE, 140720830644224, 140720830648319, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735116226560, 140737488351231, +SNULL, 140735116234751, 140737488351231, +STORE, 140735116226560, 140735116234751, +STORE, 140735116095488, 140735116234751, +STORE, 94873398054912, 94873400279039, +SNULL, 94873398165503, 94873400279039, +STORE, 94873398054912, 94873398165503, +STORE, 94873398165504, 94873400279039, +ERASE, 94873398165504, 94873400279039, +STORE, 94873400258560, 94873400270847, +STORE, 94873400270848, 94873400279039, +STORE, 140303828606976, 140303830859775, +SNULL, 140303828750335, 140303830859775, +STORE, 140303828606976, 140303828750335, +STORE, 140303828750336, 140303830859775, +ERASE, 140303828750336, 140303830859775, +STORE, 140303830847488, 140303830855679, +STORE, 140303830855680, 140303830859775, +STORE, 140735116251136, 140735116255231, +STORE, 140735116238848, 140735116251135, +STORE, 140303830818816, 140303830847487, +STORE, 140303830810624, 140303830818815, +STORE, 140303824809984, 140303828606975, +SNULL, 140303824809984, 140303826468863, +STORE, 140303826468864, 140303828606975, +STORE, 140303824809984, 140303826468863, +SNULL, 140303828566015, 140303828606975, +STORE, 140303826468864, 140303828566015, +STORE, 140303828566016, 140303828606975, +SNULL, 140303828566016, 140303828590591, +STORE, 140303828590592, 140303828606975, +STORE, 140303828566016, 140303828590591, +ERASE, 140303828566016, 140303828590591, +STORE, 140303828566016, 140303828590591, +ERASE, 140303828590592, 140303828606975, +STORE, 140303828590592, 140303828606975, +SNULL, 140303828582399, 140303828590591, +STORE, 140303828566016, 140303828582399, +STORE, 140303828582400, 140303828590591, +SNULL, 94873400266751, 94873400270847, +STORE, 94873400258560, 94873400266751, +STORE, 94873400266752, 94873400270847, +SNULL, 140303830851583, 140303830855679, +STORE, 140303830847488, 140303830851583, +STORE, 140303830851584, 140303830855679, +ERASE, 140303830818816, 140303830847487, +STORE, 94873413713920, 94873413849087, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140732349956096, 140737488351231, +SNULL, 140732349964287, 140737488351231, +STORE, 140732349956096, 140732349964287, +STORE, 140732349825024, 140732349964287, +STORE, 94009652736000, 94009655070719, +SNULL, 94009652948991, 94009655070719, +STORE, 94009652736000, 94009652948991, +STORE, 94009652948992, 94009655070719, +ERASE, 94009652948992, 94009655070719, +STORE, 94009655046144, 94009655058431, +STORE, 94009655058432, 94009655070719, +STORE, 140295688531968, 140295690784767, +SNULL, 140295688675327, 140295690784767, +STORE, 140295688531968, 140295688675327, +STORE, 140295688675328, 140295690784767, +ERASE, 140295688675328, 140295690784767, +STORE, 140295690772480, 140295690780671, +STORE, 140295690780672, 140295690784767, +STORE, 140732350005248, 140732350009343, +STORE, 140732349992960, 140732350005247, +STORE, 140295690743808, 140295690772479, +STORE, 140295690735616, 140295690743807, +STORE, 140295686418432, 140295688531967, +SNULL, 140295686418432, 140295686430719, +STORE, 140295686430720, 140295688531967, +STORE, 140295686418432, 140295686430719, +SNULL, 140295688523775, 140295688531967, +STORE, 140295686430720, 140295688523775, +STORE, 140295688523776, 140295688531967, +ERASE, 140295688523776, 140295688531967, +STORE, 140295688523776, 140295688531967, +STORE, 140295682621440, 140295686418431, +SNULL, 140295682621440, 140295684280319, +STORE, 140295684280320, 140295686418431, +STORE, 140295682621440, 140295684280319, +SNULL, 140295686377471, 140295686418431, +STORE, 140295684280320, 140295686377471, +STORE, 140295686377472, 140295686418431, +SNULL, 140295686377472, 140295686402047, +STORE, 140295686402048, 140295686418431, +STORE, 140295686377472, 140295686402047, +ERASE, 140295686377472, 140295686402047, +STORE, 140295686377472, 140295686402047, +ERASE, 140295686402048, 140295686418431, +STORE, 140295686402048, 140295686418431, +STORE, 140295690727424, 140295690743807, +SNULL, 140295686393855, 140295686402047, +STORE, 140295686377472, 140295686393855, +STORE, 140295686393856, 140295686402047, +SNULL, 140295688527871, 140295688531967, +STORE, 140295688523776, 140295688527871, +STORE, 140295688527872, 140295688531967, +SNULL, 94009655050239, 94009655058431, +STORE, 94009655046144, 94009655050239, +STORE, 94009655050240, 94009655058431, +SNULL, 140295690776575, 140295690780671, +STORE, 140295690772480, 140295690776575, +STORE, 140295690776576, 140295690780671, +ERASE, 140295690743808, 140295690772479, +STORE, 94009672114176, 94009672249343, +STORE, 140295689043968, 140295690727423, +STORE, 94009672114176, 94009672384511, +STORE, 94009672114176, 94009672519679, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722376515584, 140737488351231, +SNULL, 140722376523775, 140737488351231, +STORE, 140722376515584, 140722376523775, +STORE, 140722376384512, 140722376523775, +STORE, 94089815773184, 94089818107903, +SNULL, 94089815986175, 94089818107903, +STORE, 94089815773184, 94089815986175, +STORE, 94089815986176, 94089818107903, +ERASE, 94089815986176, 94089818107903, +STORE, 94089818083328, 94089818095615, +STORE, 94089818095616, 94089818107903, +STORE, 140265595711488, 140265597964287, +SNULL, 140265595854847, 140265597964287, +STORE, 140265595711488, 140265595854847, +STORE, 140265595854848, 140265597964287, +ERASE, 140265595854848, 140265597964287, +STORE, 140265597952000, 140265597960191, +STORE, 140265597960192, 140265597964287, +STORE, 140722378297344, 140722378301439, +STORE, 140722378285056, 140722378297343, +STORE, 140265597923328, 140265597951999, +STORE, 140265597915136, 140265597923327, +STORE, 140265593597952, 140265595711487, +SNULL, 140265593597952, 140265593610239, +STORE, 140265593610240, 140265595711487, +STORE, 140265593597952, 140265593610239, +SNULL, 140265595703295, 140265595711487, +STORE, 140265593610240, 140265595703295, +STORE, 140265595703296, 140265595711487, +ERASE, 140265595703296, 140265595711487, +STORE, 140265595703296, 140265595711487, +STORE, 140265589800960, 140265593597951, +SNULL, 140265589800960, 140265591459839, +STORE, 140265591459840, 140265593597951, +STORE, 140265589800960, 140265591459839, +SNULL, 140265593556991, 140265593597951, +STORE, 140265591459840, 140265593556991, +STORE, 140265593556992, 140265593597951, +SNULL, 140265593556992, 140265593581567, +STORE, 140265593581568, 140265593597951, +STORE, 140265593556992, 140265593581567, +ERASE, 140265593556992, 140265593581567, +STORE, 140265593556992, 140265593581567, +ERASE, 140265593581568, 140265593597951, +STORE, 140265593581568, 140265593597951, +STORE, 140265597906944, 140265597923327, +SNULL, 140265593573375, 140265593581567, +STORE, 140265593556992, 140265593573375, +STORE, 140265593573376, 140265593581567, +SNULL, 140265595707391, 140265595711487, +STORE, 140265595703296, 140265595707391, +STORE, 140265595707392, 140265595711487, +SNULL, 94089818087423, 94089818095615, +STORE, 94089818083328, 94089818087423, +STORE, 94089818087424, 94089818095615, +SNULL, 140265597956095, 140265597960191, +STORE, 140265597952000, 140265597956095, +STORE, 140265597956096, 140265597960191, +ERASE, 140265597923328, 140265597951999, +STORE, 94089837146112, 94089837281279, +STORE, 140265596223488, 140265597906943, +STORE, 94089837146112, 94089837416447, +STORE, 94089837146112, 94089837551615, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735265218560, 140737488351231, +SNULL, 140735265226751, 140737488351231, +STORE, 140735265218560, 140735265226751, +STORE, 140735265087488, 140735265226751, +STORE, 94250422370304, 94250424705023, +SNULL, 94250422583295, 94250424705023, +STORE, 94250422370304, 94250422583295, +STORE, 94250422583296, 94250424705023, +ERASE, 94250422583296, 94250424705023, +STORE, 94250424680448, 94250424692735, +STORE, 94250424692736, 94250424705023, +STORE, 140344442474496, 140344444727295, +SNULL, 140344442617855, 140344444727295, +STORE, 140344442474496, 140344442617855, +STORE, 140344442617856, 140344444727295, +ERASE, 140344442617856, 140344444727295, +STORE, 140344444715008, 140344444723199, +STORE, 140344444723200, 140344444727295, +STORE, 140735265341440, 140735265345535, +STORE, 140735265329152, 140735265341439, +STORE, 140344444686336, 140344444715007, +STORE, 140344444678144, 140344444686335, +STORE, 140344440360960, 140344442474495, +SNULL, 140344440360960, 140344440373247, +STORE, 140344440373248, 140344442474495, +STORE, 140344440360960, 140344440373247, +SNULL, 140344442466303, 140344442474495, +STORE, 140344440373248, 140344442466303, +STORE, 140344442466304, 140344442474495, +ERASE, 140344442466304, 140344442474495, +STORE, 140344442466304, 140344442474495, +STORE, 140344436563968, 140344440360959, +SNULL, 140344436563968, 140344438222847, +STORE, 140344438222848, 140344440360959, +STORE, 140344436563968, 140344438222847, +SNULL, 140344440319999, 140344440360959, +STORE, 140344438222848, 140344440319999, +STORE, 140344440320000, 140344440360959, +SNULL, 140344440320000, 140344440344575, +STORE, 140344440344576, 140344440360959, +STORE, 140344440320000, 140344440344575, +ERASE, 140344440320000, 140344440344575, +STORE, 140344440320000, 140344440344575, +ERASE, 140344440344576, 140344440360959, +STORE, 140344440344576, 140344440360959, +STORE, 140344444669952, 140344444686335, +SNULL, 140344440336383, 140344440344575, +STORE, 140344440320000, 140344440336383, +STORE, 140344440336384, 140344440344575, +SNULL, 140344442470399, 140344442474495, +STORE, 140344442466304, 140344442470399, +STORE, 140344442470400, 140344442474495, +SNULL, 94250424684543, 94250424692735, +STORE, 94250424680448, 94250424684543, +STORE, 94250424684544, 94250424692735, +SNULL, 140344444719103, 140344444723199, +STORE, 140344444715008, 140344444719103, +STORE, 140344444719104, 140344444723199, +ERASE, 140344444686336, 140344444715007, +STORE, 94250445512704, 94250445647871, +STORE, 140344442986496, 140344444669951, +STORE, 94250445512704, 94250445783039, +STORE, 94250445512704, 94250445918207, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725762719744, 140737488351231, +SNULL, 140725762727935, 140737488351231, +STORE, 140725762719744, 140725762727935, +STORE, 140725762588672, 140725762727935, +STORE, 94819009097728, 94819011432447, +SNULL, 94819009310719, 94819011432447, +STORE, 94819009097728, 94819009310719, +STORE, 94819009310720, 94819011432447, +ERASE, 94819009310720, 94819011432447, +STORE, 94819011407872, 94819011420159, +STORE, 94819011420160, 94819011432447, +STORE, 139987985596416, 139987987849215, +SNULL, 139987985739775, 139987987849215, +STORE, 139987985596416, 139987985739775, +STORE, 139987985739776, 139987987849215, +ERASE, 139987985739776, 139987987849215, +STORE, 139987987836928, 139987987845119, +STORE, 139987987845120, 139987987849215, +STORE, 140725763072000, 140725763076095, +STORE, 140725763059712, 140725763071999, +STORE, 139987987808256, 139987987836927, +STORE, 139987987800064, 139987987808255, +STORE, 139987983482880, 139987985596415, +SNULL, 139987983482880, 139987983495167, +STORE, 139987983495168, 139987985596415, +STORE, 139987983482880, 139987983495167, +SNULL, 139987985588223, 139987985596415, +STORE, 139987983495168, 139987985588223, +STORE, 139987985588224, 139987985596415, +ERASE, 139987985588224, 139987985596415, +STORE, 139987985588224, 139987985596415, +STORE, 139987979685888, 139987983482879, +SNULL, 139987979685888, 139987981344767, +STORE, 139987981344768, 139987983482879, +STORE, 139987979685888, 139987981344767, +SNULL, 139987983441919, 139987983482879, +STORE, 139987981344768, 139987983441919, +STORE, 139987983441920, 139987983482879, +SNULL, 139987983441920, 139987983466495, +STORE, 139987983466496, 139987983482879, +STORE, 139987983441920, 139987983466495, +ERASE, 139987983441920, 139987983466495, +STORE, 139987983441920, 139987983466495, +ERASE, 139987983466496, 139987983482879, +STORE, 139987983466496, 139987983482879, +STORE, 139987987791872, 139987987808255, +SNULL, 139987983458303, 139987983466495, +STORE, 139987983441920, 139987983458303, +STORE, 139987983458304, 139987983466495, +SNULL, 139987985592319, 139987985596415, +STORE, 139987985588224, 139987985592319, +STORE, 139987985592320, 139987985596415, +SNULL, 94819011411967, 94819011420159, +STORE, 94819011407872, 94819011411967, +STORE, 94819011411968, 94819011420159, +SNULL, 139987987841023, 139987987845119, +STORE, 139987987836928, 139987987841023, +STORE, 139987987841024, 139987987845119, +ERASE, 139987987808256, 139987987836927, +STORE, 94819028176896, 94819028312063, +STORE, 139987986108416, 139987987791871, +STORE, 94819028176896, 94819028447231, +STORE, 94819028176896, 94819028582399, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722475413504, 140737488351231, +SNULL, 140722475421695, 140737488351231, +STORE, 140722475413504, 140722475421695, +STORE, 140722475282432, 140722475421695, +STORE, 94620599119872, 94620601343999, +SNULL, 94620599230463, 94620601343999, +STORE, 94620599119872, 94620599230463, +STORE, 94620599230464, 94620601343999, +ERASE, 94620599230464, 94620601343999, +STORE, 94620601323520, 94620601335807, +STORE, 94620601335808, 94620601343999, +STORE, 139891763060736, 139891765313535, +SNULL, 139891763204095, 139891765313535, +STORE, 139891763060736, 139891763204095, +STORE, 139891763204096, 139891765313535, +ERASE, 139891763204096, 139891765313535, +STORE, 139891765301248, 139891765309439, +STORE, 139891765309440, 139891765313535, +STORE, 140722475700224, 140722475704319, +STORE, 140722475687936, 140722475700223, +STORE, 139891765272576, 139891765301247, +STORE, 139891765264384, 139891765272575, +STORE, 139891759263744, 139891763060735, +SNULL, 139891759263744, 139891760922623, +STORE, 139891760922624, 139891763060735, +STORE, 139891759263744, 139891760922623, +SNULL, 139891763019775, 139891763060735, +STORE, 139891760922624, 139891763019775, +STORE, 139891763019776, 139891763060735, +SNULL, 139891763019776, 139891763044351, +STORE, 139891763044352, 139891763060735, +STORE, 139891763019776, 139891763044351, +ERASE, 139891763019776, 139891763044351, +STORE, 139891763019776, 139891763044351, +ERASE, 139891763044352, 139891763060735, +STORE, 139891763044352, 139891763060735, +SNULL, 139891763036159, 139891763044351, +STORE, 139891763019776, 139891763036159, +STORE, 139891763036160, 139891763044351, +SNULL, 94620601331711, 94620601335807, +STORE, 94620601323520, 94620601331711, +STORE, 94620601331712, 94620601335807, +SNULL, 139891765305343, 139891765309439, +STORE, 139891765301248, 139891765305343, +STORE, 139891765305344, 139891765309439, +ERASE, 139891765272576, 139891765301247, +STORE, 94620610027520, 94620610162687, +STORE, 94031976210432, 94031976423423, +STORE, 94031978520576, 94031978524671, +STORE, 94031978524672, 94031978532863, +STORE, 94031978532864, 94031978545151, +STORE, 94031990398976, 94031992565759, +STORE, 140336240640000, 140336242298879, +STORE, 140336242298880, 140336244396031, +STORE, 140336244396032, 140336244412415, +STORE, 140336244412416, 140336244420607, +STORE, 140336244420608, 140336244436991, +STORE, 140336244436992, 140336244449279, +STORE, 140336244449280, 140336246542335, +STORE, 140336246542336, 140336246546431, +STORE, 140336246546432, 140336246550527, +STORE, 140336246550528, 140336246693887, +STORE, 140336247062528, 140336248745983, +STORE, 140336248745984, 140336248762367, +STORE, 140336248791040, 140336248795135, +STORE, 140336248795136, 140336248799231, +STORE, 140336248799232, 140336248803327, +STORE, 140728500064256, 140728500203519, +STORE, 140728501501952, 140728501514239, +STORE, 140728501514240, 140728501518335, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140730503987200, 140737488351231, +SNULL, 140730503995391, 140737488351231, +STORE, 140730503987200, 140730503995391, +STORE, 140730503856128, 140730503995391, +STORE, 93866544205824, 93866546429951, +SNULL, 93866544316415, 93866546429951, +STORE, 93866544205824, 93866544316415, +STORE, 93866544316416, 93866546429951, +ERASE, 93866544316416, 93866546429951, +STORE, 93866546409472, 93866546421759, +STORE, 93866546421760, 93866546429951, +STORE, 140216311959552, 140216314212351, +SNULL, 140216312102911, 140216314212351, +STORE, 140216311959552, 140216312102911, +STORE, 140216312102912, 140216314212351, +ERASE, 140216312102912, 140216314212351, +STORE, 140216314200064, 140216314208255, +STORE, 140216314208256, 140216314212351, +STORE, 140730504626176, 140730504630271, +STORE, 140730504613888, 140730504626175, +STORE, 140216314171392, 140216314200063, +STORE, 140216314163200, 140216314171391, +STORE, 140216308162560, 140216311959551, +SNULL, 140216308162560, 140216309821439, +STORE, 140216309821440, 140216311959551, +STORE, 140216308162560, 140216309821439, +SNULL, 140216311918591, 140216311959551, +STORE, 140216309821440, 140216311918591, +STORE, 140216311918592, 140216311959551, +SNULL, 140216311918592, 140216311943167, +STORE, 140216311943168, 140216311959551, +STORE, 140216311918592, 140216311943167, +ERASE, 140216311918592, 140216311943167, +STORE, 140216311918592, 140216311943167, +ERASE, 140216311943168, 140216311959551, +STORE, 140216311943168, 140216311959551, +SNULL, 140216311934975, 140216311943167, +STORE, 140216311918592, 140216311934975, +STORE, 140216311934976, 140216311943167, +SNULL, 93866546417663, 93866546421759, +STORE, 93866546409472, 93866546417663, +STORE, 93866546417664, 93866546421759, +SNULL, 140216314204159, 140216314208255, +STORE, 140216314200064, 140216314204159, +STORE, 140216314204160, 140216314208255, +ERASE, 140216314171392, 140216314200063, +STORE, 93866550386688, 93866550521855, +STORE, 94074292674560, 94074292887551, +STORE, 94074294984704, 94074294988799, +STORE, 94074294988800, 94074294996991, +STORE, 94074294996992, 94074295009279, +STORE, 94074300219392, 94074301378559, +STORE, 139781563256832, 139781564915711, +STORE, 139781564915712, 139781567012863, +STORE, 139781567012864, 139781567029247, +STORE, 139781567029248, 139781567037439, +STORE, 139781567037440, 139781567053823, +STORE, 139781567053824, 139781567066111, +STORE, 139781567066112, 139781569159167, +STORE, 139781569159168, 139781569163263, +STORE, 139781569163264, 139781569167359, +STORE, 139781569167360, 139781569310719, +STORE, 139781569679360, 139781571362815, +STORE, 139781571362816, 139781571379199, +STORE, 139781571407872, 139781571411967, +STORE, 139781571411968, 139781571416063, +STORE, 139781571416064, 139781571420159, +STORE, 140723688488960, 140723688628223, +STORE, 140723689005056, 140723689017343, +STORE, 140723689017344, 140723689021439, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735189745664, 140737488351231, +SNULL, 140735189753855, 140737488351231, +STORE, 140735189745664, 140735189753855, +STORE, 140735189614592, 140735189753855, +STORE, 94172072177664, 94172074512383, +SNULL, 94172072390655, 94172074512383, +STORE, 94172072177664, 94172072390655, +STORE, 94172072390656, 94172074512383, +ERASE, 94172072390656, 94172074512383, +STORE, 94172074487808, 94172074500095, +STORE, 94172074500096, 94172074512383, +STORE, 140687827263488, 140687829516287, +SNULL, 140687827406847, 140687829516287, +STORE, 140687827263488, 140687827406847, +STORE, 140687827406848, 140687829516287, +ERASE, 140687827406848, 140687829516287, +STORE, 140687829504000, 140687829512191, +STORE, 140687829512192, 140687829516287, +STORE, 140735189766144, 140735189770239, +STORE, 140735189753856, 140735189766143, +STORE, 140687829475328, 140687829503999, +STORE, 140687829467136, 140687829475327, +STORE, 140687825149952, 140687827263487, +SNULL, 140687825149952, 140687825162239, +STORE, 140687825162240, 140687827263487, +STORE, 140687825149952, 140687825162239, +SNULL, 140687827255295, 140687827263487, +STORE, 140687825162240, 140687827255295, +STORE, 140687827255296, 140687827263487, +ERASE, 140687827255296, 140687827263487, +STORE, 140687827255296, 140687827263487, +STORE, 140687821352960, 140687825149951, +SNULL, 140687821352960, 140687823011839, +STORE, 140687823011840, 140687825149951, +STORE, 140687821352960, 140687823011839, +SNULL, 140687825108991, 140687825149951, +STORE, 140687823011840, 140687825108991, +STORE, 140687825108992, 140687825149951, +SNULL, 140687825108992, 140687825133567, +STORE, 140687825133568, 140687825149951, +STORE, 140687825108992, 140687825133567, +ERASE, 140687825108992, 140687825133567, +STORE, 140687825108992, 140687825133567, +ERASE, 140687825133568, 140687825149951, +STORE, 140687825133568, 140687825149951, +STORE, 140687829458944, 140687829475327, +SNULL, 140687825125375, 140687825133567, +STORE, 140687825108992, 140687825125375, +STORE, 140687825125376, 140687825133567, +SNULL, 140687827259391, 140687827263487, +STORE, 140687827255296, 140687827259391, +STORE, 140687827259392, 140687827263487, +SNULL, 94172074491903, 94172074500095, +STORE, 94172074487808, 94172074491903, +STORE, 94172074491904, 94172074500095, +SNULL, 140687829508095, 140687829512191, +STORE, 140687829504000, 140687829508095, +STORE, 140687829508096, 140687829512191, +ERASE, 140687829475328, 140687829503999, +STORE, 94172092432384, 94172092567551, +STORE, 140687827775488, 140687829458943, +STORE, 94172092432384, 94172092702719, +STORE, 94172092432384, 94172092837887, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140737229504512, 140737488351231, +SNULL, 140737229512703, 140737488351231, +STORE, 140737229504512, 140737229512703, +STORE, 140737229373440, 140737229512703, +STORE, 94155246866432, 94155249090559, +SNULL, 94155246977023, 94155249090559, +STORE, 94155246866432, 94155246977023, +STORE, 94155246977024, 94155249090559, +ERASE, 94155246977024, 94155249090559, +STORE, 94155249070080, 94155249082367, +STORE, 94155249082368, 94155249090559, +STORE, 140640993693696, 140640995946495, +SNULL, 140640993837055, 140640995946495, +STORE, 140640993693696, 140640993837055, +STORE, 140640993837056, 140640995946495, +ERASE, 140640993837056, 140640995946495, +STORE, 140640995934208, 140640995942399, +STORE, 140640995942400, 140640995946495, +STORE, 140737230004224, 140737230008319, +STORE, 140737229991936, 140737230004223, +STORE, 140640995905536, 140640995934207, +STORE, 140640995897344, 140640995905535, +STORE, 140640989896704, 140640993693695, +SNULL, 140640989896704, 140640991555583, +STORE, 140640991555584, 140640993693695, +STORE, 140640989896704, 140640991555583, +SNULL, 140640993652735, 140640993693695, +STORE, 140640991555584, 140640993652735, +STORE, 140640993652736, 140640993693695, +SNULL, 140640993652736, 140640993677311, +STORE, 140640993677312, 140640993693695, +STORE, 140640993652736, 140640993677311, +ERASE, 140640993652736, 140640993677311, +STORE, 140640993652736, 140640993677311, +ERASE, 140640993677312, 140640993693695, +STORE, 140640993677312, 140640993693695, +SNULL, 140640993669119, 140640993677311, +STORE, 140640993652736, 140640993669119, +STORE, 140640993669120, 140640993677311, +SNULL, 94155249078271, 94155249082367, +STORE, 94155249070080, 94155249078271, +STORE, 94155249078272, 94155249082367, +SNULL, 140640995938303, 140640995942399, +STORE, 140640995934208, 140640995938303, +STORE, 140640995938304, 140640995942399, +ERASE, 140640995905536, 140640995934207, +STORE, 94155281035264, 94155281170431, +STORE, 94088066453504, 94088066564095, +STORE, 94088068657152, 94088068665343, +STORE, 94088068665344, 94088068669439, +STORE, 94088068669440, 94088068677631, +STORE, 94088090214400, 94088090349567, +STORE, 140503024627712, 140503026286591, +STORE, 140503026286592, 140503028383743, +STORE, 140503028383744, 140503028400127, +STORE, 140503028400128, 140503028408319, +STORE, 140503028408320, 140503028424703, +STORE, 140503028424704, 140503028568063, +STORE, 140503030628352, 140503030636543, +STORE, 140503030665216, 140503030669311, +STORE, 140503030669312, 140503030673407, +STORE, 140503030673408, 140503030677503, +STORE, 140730894725120, 140730894864383, +STORE, 140730894880768, 140730894893055, +STORE, 140730894893056, 140730894897151, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140730434342912, 140737488351231, +SNULL, 140730434351103, 140737488351231, +STORE, 140730434342912, 140730434351103, +STORE, 140730434211840, 140730434351103, +STORE, 4194304, 5128191, +STORE, 7221248, 7241727, +STORE, 7241728, 7249919, +STORE, 140109041938432, 140109044191231, +SNULL, 140109042081791, 140109044191231, +STORE, 140109041938432, 140109042081791, +STORE, 140109042081792, 140109044191231, +ERASE, 140109042081792, 140109044191231, +STORE, 140109044178944, 140109044187135, +STORE, 140109044187136, 140109044191231, +STORE, 140730434850816, 140730434854911, +STORE, 140730434838528, 140730434850815, +STORE, 140109044150272, 140109044178943, +STORE, 140109044142080, 140109044150271, +STORE, 140109038776320, 140109041938431, +SNULL, 140109038776320, 140109039837183, +STORE, 140109039837184, 140109041938431, +STORE, 140109038776320, 140109039837183, +SNULL, 140109041930239, 140109041938431, +STORE, 140109039837184, 140109041930239, +STORE, 140109041930240, 140109041938431, +ERASE, 140109041930240, 140109041938431, +STORE, 140109041930240, 140109041938431, +STORE, 140109034979328, 140109038776319, +SNULL, 140109034979328, 140109036638207, +STORE, 140109036638208, 140109038776319, +STORE, 140109034979328, 140109036638207, +SNULL, 140109038735359, 140109038776319, +STORE, 140109036638208, 140109038735359, +STORE, 140109038735360, 140109038776319, +SNULL, 140109038735360, 140109038759935, +STORE, 140109038759936, 140109038776319, +STORE, 140109038735360, 140109038759935, +ERASE, 140109038735360, 140109038759935, +STORE, 140109038735360, 140109038759935, +ERASE, 140109038759936, 140109038776319, +STORE, 140109038759936, 140109038776319, +STORE, 140109044129792, 140109044150271, +SNULL, 140109038751743, 140109038759935, +STORE, 140109038735360, 140109038751743, +STORE, 140109038751744, 140109038759935, +SNULL, 140109041934335, 140109041938431, +STORE, 140109041930240, 140109041934335, +STORE, 140109041934336, 140109041938431, +SNULL, 7233535, 7241727, +STORE, 7221248, 7233535, +STORE, 7233536, 7241727, +SNULL, 140109044183039, 140109044187135, +STORE, 140109044178944, 140109044183039, +STORE, 140109044183040, 140109044187135, +ERASE, 140109044150272, 140109044178943, +STORE, 20000768, 20135935, +STORE, 20000768, 20283391, +STORE, 140109042446336, 140109044129791, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140730853408768, 140737488351231, +SNULL, 140730853416959, 140737488351231, +STORE, 140730853408768, 140730853416959, +STORE, 140730853277696, 140730853416959, +STORE, 94865902977024, 94865905311743, +SNULL, 94865903190015, 94865905311743, +STORE, 94865902977024, 94865903190015, +STORE, 94865903190016, 94865905311743, +ERASE, 94865903190016, 94865905311743, +STORE, 94865905287168, 94865905299455, +STORE, 94865905299456, 94865905311743, +STORE, 139768865738752, 139768867991551, +SNULL, 139768865882111, 139768867991551, +STORE, 139768865738752, 139768865882111, +STORE, 139768865882112, 139768867991551, +ERASE, 139768865882112, 139768867991551, +STORE, 139768867979264, 139768867987455, +STORE, 139768867987456, 139768867991551, +STORE, 140730853957632, 140730853961727, +STORE, 140730853945344, 140730853957631, +STORE, 139768867950592, 139768867979263, +STORE, 139768867942400, 139768867950591, +STORE, 139768863625216, 139768865738751, +SNULL, 139768863625216, 139768863637503, +STORE, 139768863637504, 139768865738751, +STORE, 139768863625216, 139768863637503, +SNULL, 139768865730559, 139768865738751, +STORE, 139768863637504, 139768865730559, +STORE, 139768865730560, 139768865738751, +ERASE, 139768865730560, 139768865738751, +STORE, 139768865730560, 139768865738751, +STORE, 139768859828224, 139768863625215, +SNULL, 139768859828224, 139768861487103, +STORE, 139768861487104, 139768863625215, +STORE, 139768859828224, 139768861487103, +SNULL, 139768863584255, 139768863625215, +STORE, 139768861487104, 139768863584255, +STORE, 139768863584256, 139768863625215, +SNULL, 139768863584256, 139768863608831, +STORE, 139768863608832, 139768863625215, +STORE, 139768863584256, 139768863608831, +ERASE, 139768863584256, 139768863608831, +STORE, 139768863584256, 139768863608831, +ERASE, 139768863608832, 139768863625215, +STORE, 139768863608832, 139768863625215, +STORE, 139768867934208, 139768867950591, +SNULL, 139768863600639, 139768863608831, +STORE, 139768863584256, 139768863600639, +STORE, 139768863600640, 139768863608831, +SNULL, 139768865734655, 139768865738751, +STORE, 139768865730560, 139768865734655, +STORE, 139768865734656, 139768865738751, +SNULL, 94865905291263, 94865905299455, +STORE, 94865905287168, 94865905291263, +STORE, 94865905291264, 94865905299455, +SNULL, 139768867983359, 139768867987455, +STORE, 139768867979264, 139768867983359, +STORE, 139768867983360, 139768867987455, +ERASE, 139768867950592, 139768867979263, +STORE, 94865923670016, 94865923805183, +STORE, 139768866250752, 139768867934207, +STORE, 94865923670016, 94865923940351, +STORE, 94865923670016, 94865924075519, +STORE, 94865923670016, 94865924222975, +SNULL, 94865924210687, 94865924222975, +STORE, 94865923670016, 94865924210687, +STORE, 94865924210688, 94865924222975, +ERASE, 94865924210688, 94865924222975, +STORE, 94865923670016, 94865924349951, +STORE, 94865923670016, 94865924493311, +STORE, 94865923670016, 94865924640767, +SNULL, 94865924603903, 94865924640767, +STORE, 94865923670016, 94865924603903, +STORE, 94865924603904, 94865924640767, +ERASE, 94865924603904, 94865924640767, +STORE, 94865923670016, 94865924747263, +STORE, 94865923670016, 94865924898815, +SNULL, 94865924874239, 94865924898815, +STORE, 94865923670016, 94865924874239, +STORE, 94865924874240, 94865924898815, +ERASE, 94865924874240, 94865924898815, +STORE, 94865923670016, 94865925025791, +SNULL, 94865925013503, 94865925025791, +STORE, 94865923670016, 94865925013503, +STORE, 94865925013504, 94865925025791, +ERASE, 94865925013504, 94865925025791, +SNULL, 94865924988927, 94865925013503, +STORE, 94865923670016, 94865924988927, +STORE, 94865924988928, 94865925013503, +ERASE, 94865924988928, 94865925013503, +STORE, 94865923670016, 94865925152767, +SNULL, 94865925136383, 94865925152767, +STORE, 94865923670016, 94865925136383, +STORE, 94865925136384, 94865925152767, +ERASE, 94865925136384, 94865925152767, +STORE, 94865923670016, 94865925292031, +SNULL, 94865925279743, 94865925292031, +STORE, 94865923670016, 94865925279743, +STORE, 94865925279744, 94865925292031, +ERASE, 94865925279744, 94865925292031, +SNULL, 94865925255167, 94865925279743, +STORE, 94865923670016, 94865925255167, +STORE, 94865925255168, 94865925279743, +ERASE, 94865925255168, 94865925279743, +STORE, 94865923670016, 94865925406719, +SNULL, 94865925394431, 94865925406719, +STORE, 94865923670016, 94865925394431, +STORE, 94865925394432, 94865925406719, +ERASE, 94865925394432, 94865925406719, +STORE, 94865923670016, 94865925545983, +SNULL, 94865925533695, 94865925545983, +STORE, 94865923670016, 94865925533695, +STORE, 94865925533696, 94865925545983, +ERASE, 94865925533696, 94865925545983, +SNULL, 94865925492735, 94865925533695, +STORE, 94865923670016, 94865925492735, +STORE, 94865925492736, 94865925533695, +ERASE, 94865925492736, 94865925533695, +STORE, 94865923670016, 94865925627903, +SNULL, 94865925599231, 94865925627903, +STORE, 94865923670016, 94865925599231, +STORE, 94865925599232, 94865925627903, +ERASE, 94865925599232, 94865925627903, +STORE, 94865923670016, 94865925738495, +SNULL, 94865925726207, 94865925738495, +STORE, 94865923670016, 94865925726207, +STORE, 94865925726208, 94865925738495, +ERASE, 94865925726208, 94865925738495, +STORE, 94865923670016, 94865925877759, +SNULL, 94865925865471, 94865925877759, +STORE, 94865923670016, 94865925865471, +STORE, 94865925865472, 94865925877759, +ERASE, 94865925865472, 94865925877759, +STORE, 94865923670016, 94865926021119, +SNULL, 94865926008831, 94865926021119, +STORE, 94865923670016, 94865926008831, +STORE, 94865926008832, 94865926021119, +ERASE, 94865926008832, 94865926021119, +SNULL, 94865925971967, 94865926008831, +STORE, 94865923670016, 94865925971967, +STORE, 94865925971968, 94865926008831, +ERASE, 94865925971968, 94865926008831, +STORE, 94865923670016, 94865926115327, +STORE, 94865923670016, 94865926254591, +SNULL, 94865926246399, 94865926254591, +STORE, 94865923670016, 94865926246399, +STORE, 94865926246400, 94865926254591, +ERASE, 94865926246400, 94865926254591, +STORE, 94865923670016, 94865926385663, +STORE, 94865923670016, 94865926537215, +STORE, 94865923670016, 94865926672383, +STORE, 94865923670016, 94865926815743, +STORE, 94865923670016, 94865926955007, +STORE, 94865923670016, 94865927094271, +STORE, 94865923670016, 94865927233535, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140731148435456, 140737488351231, +SNULL, 140731148443647, 140737488351231, +STORE, 140731148435456, 140731148443647, +STORE, 140731148304384, 140731148443647, +STORE, 94090775400448, 94090777735167, +SNULL, 94090775613439, 94090777735167, +STORE, 94090775400448, 94090775613439, +STORE, 94090775613440, 94090777735167, +ERASE, 94090775613440, 94090777735167, +STORE, 94090777710592, 94090777722879, +STORE, 94090777722880, 94090777735167, +STORE, 140301090283520, 140301092536319, +SNULL, 140301090426879, 140301092536319, +STORE, 140301090283520, 140301090426879, +STORE, 140301090426880, 140301092536319, +ERASE, 140301090426880, 140301092536319, +STORE, 140301092524032, 140301092532223, +STORE, 140301092532224, 140301092536319, +STORE, 140731148570624, 140731148574719, +STORE, 140731148558336, 140731148570623, +STORE, 140301092495360, 140301092524031, +STORE, 140301092487168, 140301092495359, +STORE, 140301088169984, 140301090283519, +SNULL, 140301088169984, 140301088182271, +STORE, 140301088182272, 140301090283519, +STORE, 140301088169984, 140301088182271, +SNULL, 140301090275327, 140301090283519, +STORE, 140301088182272, 140301090275327, +STORE, 140301090275328, 140301090283519, +ERASE, 140301090275328, 140301090283519, +STORE, 140301090275328, 140301090283519, +STORE, 140301084372992, 140301088169983, +SNULL, 140301084372992, 140301086031871, +STORE, 140301086031872, 140301088169983, +STORE, 140301084372992, 140301086031871, +SNULL, 140301088129023, 140301088169983, +STORE, 140301086031872, 140301088129023, +STORE, 140301088129024, 140301088169983, +SNULL, 140301088129024, 140301088153599, +STORE, 140301088153600, 140301088169983, +STORE, 140301088129024, 140301088153599, +ERASE, 140301088129024, 140301088153599, +STORE, 140301088129024, 140301088153599, +ERASE, 140301088153600, 140301088169983, +STORE, 140301088153600, 140301088169983, +STORE, 140301092478976, 140301092495359, +SNULL, 140301088145407, 140301088153599, +STORE, 140301088129024, 140301088145407, +STORE, 140301088145408, 140301088153599, +SNULL, 140301090279423, 140301090283519, +STORE, 140301090275328, 140301090279423, +STORE, 140301090279424, 140301090283519, +SNULL, 94090777714687, 94090777722879, +STORE, 94090777710592, 94090777714687, +STORE, 94090777714688, 94090777722879, +SNULL, 140301092528127, 140301092532223, +STORE, 140301092524032, 140301092528127, +STORE, 140301092528128, 140301092532223, +ERASE, 140301092495360, 140301092524031, +STORE, 94090794590208, 94090794725375, +STORE, 140301090795520, 140301092478975, +STORE, 94090794590208, 94090794860543, +STORE, 94090794590208, 94090794995711, +STORE, 94090794590208, 94090795163647, +SNULL, 94090795139071, 94090795163647, +STORE, 94090794590208, 94090795139071, +STORE, 94090795139072, 94090795163647, +ERASE, 94090795139072, 94090795163647, +STORE, 94090794590208, 94090795278335, +STORE, 94090794590208, 94090795425791, +SNULL, 94090795388927, 94090795425791, +STORE, 94090794590208, 94090795388927, +STORE, 94090795388928, 94090795425791, +ERASE, 94090795388928, 94090795425791, +STORE, 94090794590208, 94090795528191, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733084430336, 140737488351231, +SNULL, 140733084438527, 140737488351231, +STORE, 140733084430336, 140733084438527, +STORE, 140733084299264, 140733084438527, +STORE, 94116169183232, 94116171517951, +SNULL, 94116169396223, 94116171517951, +STORE, 94116169183232, 94116169396223, +STORE, 94116169396224, 94116171517951, +ERASE, 94116169396224, 94116171517951, +STORE, 94116171493376, 94116171505663, +STORE, 94116171505664, 94116171517951, +STORE, 139772214128640, 139772216381439, +SNULL, 139772214271999, 139772216381439, +STORE, 139772214128640, 139772214271999, +STORE, 139772214272000, 139772216381439, +ERASE, 139772214272000, 139772216381439, +STORE, 139772216369152, 139772216377343, +STORE, 139772216377344, 139772216381439, +STORE, 140733085270016, 140733085274111, +STORE, 140733085257728, 140733085270015, +STORE, 139772216340480, 139772216369151, +STORE, 139772216332288, 139772216340479, +STORE, 139772212015104, 139772214128639, +SNULL, 139772212015104, 139772212027391, +STORE, 139772212027392, 139772214128639, +STORE, 139772212015104, 139772212027391, +SNULL, 139772214120447, 139772214128639, +STORE, 139772212027392, 139772214120447, +STORE, 139772214120448, 139772214128639, +ERASE, 139772214120448, 139772214128639, +STORE, 139772214120448, 139772214128639, +STORE, 139772208218112, 139772212015103, +SNULL, 139772208218112, 139772209876991, +STORE, 139772209876992, 139772212015103, +STORE, 139772208218112, 139772209876991, +SNULL, 139772211974143, 139772212015103, +STORE, 139772209876992, 139772211974143, +STORE, 139772211974144, 139772212015103, +SNULL, 139772211974144, 139772211998719, +STORE, 139772211998720, 139772212015103, +STORE, 139772211974144, 139772211998719, +ERASE, 139772211974144, 139772211998719, +STORE, 139772211974144, 139772211998719, +ERASE, 139772211998720, 139772212015103, +STORE, 139772211998720, 139772212015103, +STORE, 139772216324096, 139772216340479, +SNULL, 139772211990527, 139772211998719, +STORE, 139772211974144, 139772211990527, +STORE, 139772211990528, 139772211998719, +SNULL, 139772214124543, 139772214128639, +STORE, 139772214120448, 139772214124543, +STORE, 139772214124544, 139772214128639, +SNULL, 94116171497471, 94116171505663, +STORE, 94116171493376, 94116171497471, +STORE, 94116171497472, 94116171505663, +SNULL, 139772216373247, 139772216377343, +STORE, 139772216369152, 139772216373247, +STORE, 139772216373248, 139772216377343, +ERASE, 139772216340480, 139772216369151, +STORE, 94116199383040, 94116199518207, +STORE, 139772214640640, 139772216324095, +STORE, 94116199383040, 94116199653375, +STORE, 94116199383040, 94116199788543, +STORE, 140737488347136, 140737488351231, +STORE, 140726067826688, 140737488351231, +SNULL, 140726067830783, 140737488351231, +STORE, 140726067826688, 140726067830783, +STORE, 140726067695616, 140726067830783, +STORE, 94535150673920, 94535152898047, +SNULL, 94535150784511, 94535152898047, +STORE, 94535150673920, 94535150784511, +STORE, 94535150784512, 94535152898047, +ERASE, 94535150784512, 94535152898047, +STORE, 94535152877568, 94535152889855, +STORE, 94535152889856, 94535152898047, +STORE, 140381257314304, 140381259567103, +SNULL, 140381257457663, 140381259567103, +STORE, 140381257314304, 140381257457663, +STORE, 140381257457664, 140381259567103, +ERASE, 140381257457664, 140381259567103, +STORE, 140381259554816, 140381259563007, +STORE, 140381259563008, 140381259567103, +STORE, 140726068060160, 140726068064255, +STORE, 140726068047872, 140726068060159, +STORE, 140381259526144, 140381259554815, +STORE, 140381259517952, 140381259526143, +STORE, 140381253517312, 140381257314303, +SNULL, 140381253517312, 140381255176191, +STORE, 140381255176192, 140381257314303, +STORE, 140381253517312, 140381255176191, +SNULL, 140381257273343, 140381257314303, +STORE, 140381255176192, 140381257273343, +STORE, 140381257273344, 140381257314303, +SNULL, 140381257273344, 140381257297919, +STORE, 140381257297920, 140381257314303, +STORE, 140381257273344, 140381257297919, +ERASE, 140381257273344, 140381257297919, +STORE, 140381257273344, 140381257297919, +ERASE, 140381257297920, 140381257314303, +STORE, 140381257297920, 140381257314303, +SNULL, 140381257289727, 140381257297919, +STORE, 140381257273344, 140381257289727, +STORE, 140381257289728, 140381257297919, +SNULL, 94535152885759, 94535152889855, +STORE, 94535152877568, 94535152885759, +STORE, 94535152885760, 94535152889855, +SNULL, 140381259558911, 140381259563007, +STORE, 140381259554816, 140381259558911, +STORE, 140381259558912, 140381259563007, +ERASE, 140381259526144, 140381259554815, +STORE, 94535186296832, 94535186431999, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140729189425152, 140737488351231, +SNULL, 140729189433343, 140737488351231, +STORE, 140729189425152, 140729189433343, +STORE, 140729189294080, 140729189433343, +STORE, 94428200128512, 94428202352639, +SNULL, 94428200239103, 94428202352639, +STORE, 94428200128512, 94428200239103, +STORE, 94428200239104, 94428202352639, +ERASE, 94428200239104, 94428202352639, +STORE, 94428202332160, 94428202344447, +STORE, 94428202344448, 94428202352639, +STORE, 139707216986112, 139707219238911, +SNULL, 139707217129471, 139707219238911, +STORE, 139707216986112, 139707217129471, +STORE, 139707217129472, 139707219238911, +ERASE, 139707217129472, 139707219238911, +STORE, 139707219226624, 139707219234815, +STORE, 139707219234816, 139707219238911, +STORE, 140729189785600, 140729189789695, +STORE, 140729189773312, 140729189785599, +STORE, 139707219197952, 139707219226623, +STORE, 139707219189760, 139707219197951, +STORE, 139707213189120, 139707216986111, +SNULL, 139707213189120, 139707214847999, +STORE, 139707214848000, 139707216986111, +STORE, 139707213189120, 139707214847999, +SNULL, 139707216945151, 139707216986111, +STORE, 139707214848000, 139707216945151, +STORE, 139707216945152, 139707216986111, +SNULL, 139707216945152, 139707216969727, +STORE, 139707216969728, 139707216986111, +STORE, 139707216945152, 139707216969727, +ERASE, 139707216945152, 139707216969727, +STORE, 139707216945152, 139707216969727, +ERASE, 139707216969728, 139707216986111, +STORE, 139707216969728, 139707216986111, +SNULL, 139707216961535, 139707216969727, +STORE, 139707216945152, 139707216961535, +STORE, 139707216961536, 139707216969727, +SNULL, 94428202340351, 94428202344447, +STORE, 94428202332160, 94428202340351, +STORE, 94428202340352, 94428202344447, +SNULL, 139707219230719, 139707219234815, +STORE, 139707219226624, 139707219230719, +STORE, 139707219230720, 139707219234815, +ERASE, 139707219197952, 139707219226623, +STORE, 94428208599040, 94428208734207, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722000953344, 140737488351231, +SNULL, 140722000961535, 140737488351231, +STORE, 140722000953344, 140722000961535, +STORE, 140722000822272, 140722000961535, +STORE, 94636494757888, 94636496982015, +SNULL, 94636494868479, 94636496982015, +STORE, 94636494757888, 94636494868479, +STORE, 94636494868480, 94636496982015, +ERASE, 94636494868480, 94636496982015, +STORE, 94636496961536, 94636496973823, +STORE, 94636496973824, 94636496982015, +STORE, 140142275100672, 140142277353471, +SNULL, 140142275244031, 140142277353471, +STORE, 140142275100672, 140142275244031, +STORE, 140142275244032, 140142277353471, +ERASE, 140142275244032, 140142277353471, +STORE, 140142277341184, 140142277349375, +STORE, 140142277349376, 140142277353471, +STORE, 140722002747392, 140722002751487, +STORE, 140722002735104, 140722002747391, +STORE, 140142277312512, 140142277341183, +STORE, 140142277304320, 140142277312511, +STORE, 140142271303680, 140142275100671, +SNULL, 140142271303680, 140142272962559, +STORE, 140142272962560, 140142275100671, +STORE, 140142271303680, 140142272962559, +SNULL, 140142275059711, 140142275100671, +STORE, 140142272962560, 140142275059711, +STORE, 140142275059712, 140142275100671, +SNULL, 140142275059712, 140142275084287, +STORE, 140142275084288, 140142275100671, +STORE, 140142275059712, 140142275084287, +ERASE, 140142275059712, 140142275084287, +STORE, 140142275059712, 140142275084287, +ERASE, 140142275084288, 140142275100671, +STORE, 140142275084288, 140142275100671, +SNULL, 140142275076095, 140142275084287, +STORE, 140142275059712, 140142275076095, +STORE, 140142275076096, 140142275084287, +SNULL, 94636496969727, 94636496973823, +STORE, 94636496961536, 94636496969727, +STORE, 94636496969728, 94636496973823, +SNULL, 140142277345279, 140142277349375, +STORE, 140142277341184, 140142277345279, +STORE, 140142277345280, 140142277349375, +ERASE, 140142277312512, 140142277341183, +STORE, 94636516286464, 94636516421631, +STORE, 94071103692800, 94071103905791, +STORE, 94071106002944, 94071106007039, +STORE, 94071106007040, 94071106015231, +STORE, 94071106015232, 94071106027519, +STORE, 94071138521088, 94071140368383, +STORE, 140145668190208, 140145669849087, +STORE, 140145669849088, 140145671946239, +STORE, 140145671946240, 140145671962623, +STORE, 140145671962624, 140145671970815, +STORE, 140145671970816, 140145671987199, +STORE, 140145671987200, 140145671999487, +STORE, 140145671999488, 140145674092543, +STORE, 140145674092544, 140145674096639, +STORE, 140145674096640, 140145674100735, +STORE, 140145674100736, 140145674244095, +STORE, 140145674612736, 140145676296191, +STORE, 140145676296192, 140145676312575, +STORE, 140145676341248, 140145676345343, +STORE, 140145676345344, 140145676349439, +STORE, 140145676349440, 140145676353535, +STORE, 140734927740928, 140734927880191, +STORE, 140734928842752, 140734928855039, +STORE, 140734928855040, 140734928859135, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722342535168, 140737488351231, +SNULL, 140722342543359, 140737488351231, +STORE, 140722342535168, 140722342543359, +STORE, 140722342404096, 140722342543359, +STORE, 94399699714048, 94399702048767, +SNULL, 94399699927039, 94399702048767, +STORE, 94399699714048, 94399699927039, +STORE, 94399699927040, 94399702048767, +ERASE, 94399699927040, 94399702048767, +STORE, 94399702024192, 94399702036479, +STORE, 94399702036480, 94399702048767, +STORE, 139811024748544, 139811027001343, +SNULL, 139811024891903, 139811027001343, +STORE, 139811024748544, 139811024891903, +STORE, 139811024891904, 139811027001343, +ERASE, 139811024891904, 139811027001343, +STORE, 139811026989056, 139811026997247, +STORE, 139811026997248, 139811027001343, +STORE, 140722342707200, 140722342711295, +STORE, 140722342694912, 140722342707199, +STORE, 139811026960384, 139811026989055, +STORE, 139811026952192, 139811026960383, +STORE, 139811022635008, 139811024748543, +SNULL, 139811022635008, 139811022647295, +STORE, 139811022647296, 139811024748543, +STORE, 139811022635008, 139811022647295, +SNULL, 139811024740351, 139811024748543, +STORE, 139811022647296, 139811024740351, +STORE, 139811024740352, 139811024748543, +ERASE, 139811024740352, 139811024748543, +STORE, 139811024740352, 139811024748543, +STORE, 139811018838016, 139811022635007, +SNULL, 139811018838016, 139811020496895, +STORE, 139811020496896, 139811022635007, +STORE, 139811018838016, 139811020496895, +SNULL, 139811022594047, 139811022635007, +STORE, 139811020496896, 139811022594047, +STORE, 139811022594048, 139811022635007, +SNULL, 139811022594048, 139811022618623, +STORE, 139811022618624, 139811022635007, +STORE, 139811022594048, 139811022618623, +ERASE, 139811022594048, 139811022618623, +STORE, 139811022594048, 139811022618623, +ERASE, 139811022618624, 139811022635007, +STORE, 139811022618624, 139811022635007, +STORE, 139811026944000, 139811026960383, +SNULL, 139811022610431, 139811022618623, +STORE, 139811022594048, 139811022610431, +STORE, 139811022610432, 139811022618623, +SNULL, 139811024744447, 139811024748543, +STORE, 139811024740352, 139811024744447, +STORE, 139811024744448, 139811024748543, +SNULL, 94399702028287, 94399702036479, +STORE, 94399702024192, 94399702028287, +STORE, 94399702028288, 94399702036479, +SNULL, 139811026993151, 139811026997247, +STORE, 139811026989056, 139811026993151, +STORE, 139811026993152, 139811026997247, +ERASE, 139811026960384, 139811026989055, +STORE, 94399723880448, 94399724015615, +STORE, 139811025260544, 139811026943999, +STORE, 94399723880448, 94399724150783, +STORE, 94399723880448, 94399724285951, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140735364939776, 140737488351231, +SNULL, 140735364947967, 140737488351231, +STORE, 140735364939776, 140735364947967, +STORE, 140735364808704, 140735364947967, +STORE, 94421528674304, 94421531009023, +SNULL, 94421528887295, 94421531009023, +STORE, 94421528674304, 94421528887295, +STORE, 94421528887296, 94421531009023, +ERASE, 94421528887296, 94421531009023, +STORE, 94421530984448, 94421530996735, +STORE, 94421530996736, 94421531009023, +STORE, 140162004742144, 140162006994943, +SNULL, 140162004885503, 140162006994943, +STORE, 140162004742144, 140162004885503, +STORE, 140162004885504, 140162006994943, +ERASE, 140162004885504, 140162006994943, +STORE, 140162006982656, 140162006990847, +STORE, 140162006990848, 140162006994943, +STORE, 140735365402624, 140735365406719, +STORE, 140735365390336, 140735365402623, +STORE, 140162006953984, 140162006982655, +STORE, 140162006945792, 140162006953983, +STORE, 140162002628608, 140162004742143, +SNULL, 140162002628608, 140162002640895, +STORE, 140162002640896, 140162004742143, +STORE, 140162002628608, 140162002640895, +SNULL, 140162004733951, 140162004742143, +STORE, 140162002640896, 140162004733951, +STORE, 140162004733952, 140162004742143, +ERASE, 140162004733952, 140162004742143, +STORE, 140162004733952, 140162004742143, +STORE, 140161998831616, 140162002628607, +SNULL, 140161998831616, 140162000490495, +STORE, 140162000490496, 140162002628607, +STORE, 140161998831616, 140162000490495, +SNULL, 140162002587647, 140162002628607, +STORE, 140162000490496, 140162002587647, +STORE, 140162002587648, 140162002628607, +SNULL, 140162002587648, 140162002612223, +STORE, 140162002612224, 140162002628607, +STORE, 140162002587648, 140162002612223, +ERASE, 140162002587648, 140162002612223, +STORE, 140162002587648, 140162002612223, +ERASE, 140162002612224, 140162002628607, +STORE, 140162002612224, 140162002628607, +STORE, 140162006937600, 140162006953983, +SNULL, 140162002604031, 140162002612223, +STORE, 140162002587648, 140162002604031, +STORE, 140162002604032, 140162002612223, +SNULL, 140162004738047, 140162004742143, +STORE, 140162004733952, 140162004738047, +STORE, 140162004738048, 140162004742143, +SNULL, 94421530988543, 94421530996735, +STORE, 94421530984448, 94421530988543, +STORE, 94421530988544, 94421530996735, +SNULL, 140162006986751, 140162006990847, +STORE, 140162006982656, 140162006986751, +STORE, 140162006986752, 140162006990847, +ERASE, 140162006953984, 140162006982655, +STORE, 94421551697920, 94421551833087, +STORE, 140162005254144, 140162006937599, +STORE, 94421551697920, 94421551968255, +STORE, 94421551697920, 94421552103423, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140733498486784, 140737488351231, +SNULL, 140733498494975, 140737488351231, +STORE, 140733498486784, 140733498494975, +STORE, 140733498355712, 140733498494975, +STORE, 94567985836032, 94567988170751, +SNULL, 94567986049023, 94567988170751, +STORE, 94567985836032, 94567986049023, +STORE, 94567986049024, 94567988170751, +ERASE, 94567986049024, 94567988170751, +STORE, 94567988146176, 94567988158463, +STORE, 94567988158464, 94567988170751, +STORE, 139634278572032, 139634280824831, +SNULL, 139634278715391, 139634280824831, +STORE, 139634278572032, 139634278715391, +STORE, 139634278715392, 139634280824831, +ERASE, 139634278715392, 139634280824831, +STORE, 139634280812544, 139634280820735, +STORE, 139634280820736, 139634280824831, +STORE, 140733498544128, 140733498548223, +STORE, 140733498531840, 140733498544127, +STORE, 139634280783872, 139634280812543, +STORE, 139634280775680, 139634280783871, +STORE, 139634276458496, 139634278572031, +SNULL, 139634276458496, 139634276470783, +STORE, 139634276470784, 139634278572031, +STORE, 139634276458496, 139634276470783, +SNULL, 139634278563839, 139634278572031, +STORE, 139634276470784, 139634278563839, +STORE, 139634278563840, 139634278572031, +ERASE, 139634278563840, 139634278572031, +STORE, 139634278563840, 139634278572031, +STORE, 139634272661504, 139634276458495, +SNULL, 139634272661504, 139634274320383, +STORE, 139634274320384, 139634276458495, +STORE, 139634272661504, 139634274320383, +SNULL, 139634276417535, 139634276458495, +STORE, 139634274320384, 139634276417535, +STORE, 139634276417536, 139634276458495, +SNULL, 139634276417536, 139634276442111, +STORE, 139634276442112, 139634276458495, +STORE, 139634276417536, 139634276442111, +ERASE, 139634276417536, 139634276442111, +STORE, 139634276417536, 139634276442111, +ERASE, 139634276442112, 139634276458495, +STORE, 139634276442112, 139634276458495, +STORE, 139634280767488, 139634280783871, +SNULL, 139634276433919, 139634276442111, +STORE, 139634276417536, 139634276433919, +STORE, 139634276433920, 139634276442111, +SNULL, 139634278567935, 139634278572031, +STORE, 139634278563840, 139634278567935, +STORE, 139634278567936, 139634278572031, +SNULL, 94567988150271, 94567988158463, +STORE, 94567988146176, 94567988150271, +STORE, 94567988150272, 94567988158463, +SNULL, 139634280816639, 139634280820735, +STORE, 139634280812544, 139634280816639, +STORE, 139634280816640, 139634280820735, +ERASE, 139634280783872, 139634280812543, +STORE, 94567996379136, 94567996514303, +STORE, 139634279084032, 139634280767487, +STORE, 94567996379136, 94567996649471, +STORE, 94567996379136, 94567996784639, +STORE, 94567996379136, 94567996960767, +SNULL, 94567996932095, 94567996960767, +STORE, 94567996379136, 94567996932095, +STORE, 94567996932096, 94567996960767, +ERASE, 94567996932096, 94567996960767, +STORE, 94567996379136, 94567997071359, +STORE, 94567996379136, 94567997206527, +SNULL, 94567997186047, 94567997206527, +STORE, 94567996379136, 94567997186047, +STORE, 94567997186048, 94567997206527, +ERASE, 94567997186048, 94567997206527, +STORE, 94567996379136, 94567997358079, +STORE, 94567996379136, 94567997493247, +SNULL, 94567997476863, 94567997493247, +STORE, 94567996379136, 94567997476863, +STORE, 94567997476864, 94567997493247, +ERASE, 94567997476864, 94567997493247, +STORE, 94567996379136, 94567997612031, +STORE, 94567996379136, 94567997767679, +SNULL, 94567997739007, 94567997767679, +STORE, 94567996379136, 94567997739007, +STORE, 94567997739008, 94567997767679, +ERASE, 94567997739008, 94567997767679, +SNULL, 94567997698047, 94567997739007, +STORE, 94567996379136, 94567997698047, +STORE, 94567997698048, 94567997739007, +ERASE, 94567997698048, 94567997739007, +STORE, 94567996379136, 94567997853695, +STORE, 94567996379136, 94567997988863, +STORE, 94567996379136, 94567998132223, +STORE, 94567996379136, 94567998275583, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140723667759104, 140737488351231, +SNULL, 140723667767295, 140737488351231, +STORE, 140723667759104, 140723667767295, +STORE, 140723667628032, 140723667767295, +STORE, 94231598800896, 94231601135615, +SNULL, 94231599013887, 94231601135615, +STORE, 94231598800896, 94231599013887, +STORE, 94231599013888, 94231601135615, +ERASE, 94231599013888, 94231601135615, +STORE, 94231601111040, 94231601123327, +STORE, 94231601123328, 94231601135615, +STORE, 140269472649216, 140269474902015, +SNULL, 140269472792575, 140269474902015, +STORE, 140269472649216, 140269472792575, +STORE, 140269472792576, 140269474902015, +ERASE, 140269472792576, 140269474902015, +STORE, 140269474889728, 140269474897919, +STORE, 140269474897920, 140269474902015, +STORE, 140723667836928, 140723667841023, +STORE, 140723667824640, 140723667836927, +STORE, 140269474861056, 140269474889727, +STORE, 140269474852864, 140269474861055, +STORE, 140269470535680, 140269472649215, +SNULL, 140269470535680, 140269470547967, +STORE, 140269470547968, 140269472649215, +STORE, 140269470535680, 140269470547967, +SNULL, 140269472641023, 140269472649215, +STORE, 140269470547968, 140269472641023, +STORE, 140269472641024, 140269472649215, +ERASE, 140269472641024, 140269472649215, +STORE, 140269472641024, 140269472649215, +STORE, 140269466738688, 140269470535679, +SNULL, 140269466738688, 140269468397567, +STORE, 140269468397568, 140269470535679, +STORE, 140269466738688, 140269468397567, +SNULL, 140269470494719, 140269470535679, +STORE, 140269468397568, 140269470494719, +STORE, 140269470494720, 140269470535679, +SNULL, 140269470494720, 140269470519295, +STORE, 140269470519296, 140269470535679, +STORE, 140269470494720, 140269470519295, +ERASE, 140269470494720, 140269470519295, +STORE, 140269470494720, 140269470519295, +ERASE, 140269470519296, 140269470535679, +STORE, 140269470519296, 140269470535679, +STORE, 140269474844672, 140269474861055, +SNULL, 140269470511103, 140269470519295, +STORE, 140269470494720, 140269470511103, +STORE, 140269470511104, 140269470519295, +SNULL, 140269472645119, 140269472649215, +STORE, 140269472641024, 140269472645119, +STORE, 140269472645120, 140269472649215, +SNULL, 94231601115135, 94231601123327, +STORE, 94231601111040, 94231601115135, +STORE, 94231601115136, 94231601123327, +SNULL, 140269474893823, 140269474897919, +STORE, 140269474889728, 140269474893823, +STORE, 140269474893824, 140269474897919, +ERASE, 140269474861056, 140269474889727, +STORE, 94231626592256, 94231626727423, +STORE, 140269473161216, 140269474844671, +STORE, 94231626592256, 94231626862591, +STORE, 94231626592256, 94231626997759, +STORE, 94327178862592, 94327179075583, +STORE, 94327181172736, 94327181176831, +STORE, 94327181176832, 94327181185023, +STORE, 94327181185024, 94327181197311, +STORE, 94327185715200, 94327186685951, +STORE, 140172071755776, 140172073414655, +STORE, 140172073414656, 140172075511807, +STORE, 140172075511808, 140172075528191, +STORE, 140172075528192, 140172075536383, +STORE, 140172075536384, 140172075552767, +STORE, 140172075552768, 140172075565055, +STORE, 140172075565056, 140172077658111, +STORE, 140172077658112, 140172077662207, +STORE, 140172077662208, 140172077666303, +STORE, 140172077666304, 140172077809663, +STORE, 140172078178304, 140172079861759, +STORE, 140172079861760, 140172079878143, +STORE, 140172079878144, 140172079906815, +STORE, 140172079906816, 140172079910911, +STORE, 140172079910912, 140172079915007, +STORE, 140172079915008, 140172079919103, +STORE, 140720358359040, 140720358494207, +STORE, 140720358498304, 140720358510591, +STORE, 140720358510592, 140720358514687, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140722548621312, 140737488351231, +SNULL, 140722548629503, 140737488351231, +STORE, 140722548621312, 140722548629503, +STORE, 140722548490240, 140722548629503, +STORE, 93949289504768, 93949291728895, +SNULL, 93949289615359, 93949291728895, +STORE, 93949289504768, 93949289615359, +STORE, 93949289615360, 93949291728895, +ERASE, 93949289615360, 93949291728895, +STORE, 93949291708416, 93949291720703, +STORE, 93949291720704, 93949291728895, +STORE, 140305861902336, 140305864155135, +SNULL, 140305862045695, 140305864155135, +STORE, 140305861902336, 140305862045695, +STORE, 140305862045696, 140305864155135, +ERASE, 140305862045696, 140305864155135, +STORE, 140305864142848, 140305864151039, +STORE, 140305864151040, 140305864155135, +STORE, 140722549821440, 140722549825535, +STORE, 140722549809152, 140722549821439, +STORE, 140305864114176, 140305864142847, +STORE, 140305864105984, 140305864114175, +STORE, 140305858105344, 140305861902335, +SNULL, 140305858105344, 140305859764223, +STORE, 140305859764224, 140305861902335, +STORE, 140305858105344, 140305859764223, +SNULL, 140305861861375, 140305861902335, +STORE, 140305859764224, 140305861861375, +STORE, 140305861861376, 140305861902335, +SNULL, 140305861861376, 140305861885951, +STORE, 140305861885952, 140305861902335, +STORE, 140305861861376, 140305861885951, +ERASE, 140305861861376, 140305861885951, +STORE, 140305861861376, 140305861885951, +ERASE, 140305861885952, 140305861902335, +STORE, 140305861885952, 140305861902335, +SNULL, 140305861877759, 140305861885951, +STORE, 140305861861376, 140305861877759, +STORE, 140305861877760, 140305861885951, +SNULL, 93949291716607, 93949291720703, +STORE, 93949291708416, 93949291716607, +STORE, 93949291716608, 93949291720703, +SNULL, 140305864146943, 140305864151039, +STORE, 140305864142848, 140305864146943, +STORE, 140305864146944, 140305864151039, +ERASE, 140305864114176, 140305864142847, +STORE, 93949324136448, 93949324271615, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140725754908672, 140737488351231, +SNULL, 140725754916863, 140737488351231, +STORE, 140725754908672, 140725754916863, +STORE, 140725754777600, 140725754916863, +STORE, 94831184375808, 94831186599935, +SNULL, 94831184486399, 94831186599935, +STORE, 94831184375808, 94831184486399, +STORE, 94831184486400, 94831186599935, +ERASE, 94831184486400, 94831186599935, +STORE, 94831186579456, 94831186591743, +STORE, 94831186591744, 94831186599935, +STORE, 140605482479616, 140605484732415, +SNULL, 140605482622975, 140605484732415, +STORE, 140605482479616, 140605482622975, +STORE, 140605482622976, 140605484732415, +ERASE, 140605482622976, 140605484732415, +STORE, 140605484720128, 140605484728319, +STORE, 140605484728320, 140605484732415, +STORE, 140725755670528, 140725755674623, +STORE, 140725755658240, 140725755670527, +STORE, 140605484691456, 140605484720127, +STORE, 140605484683264, 140605484691455, +STORE, 140605478682624, 140605482479615, +SNULL, 140605478682624, 140605480341503, +STORE, 140605480341504, 140605482479615, +STORE, 140605478682624, 140605480341503, +SNULL, 140605482438655, 140605482479615, +STORE, 140605480341504, 140605482438655, +STORE, 140605482438656, 140605482479615, +SNULL, 140605482438656, 140605482463231, +STORE, 140605482463232, 140605482479615, +STORE, 140605482438656, 140605482463231, +ERASE, 140605482438656, 140605482463231, +STORE, 140605482438656, 140605482463231, +ERASE, 140605482463232, 140605482479615, +STORE, 140605482463232, 140605482479615, +SNULL, 140605482455039, 140605482463231, +STORE, 140605482438656, 140605482455039, +STORE, 140605482455040, 140605482463231, +SNULL, 94831186587647, 94831186591743, +STORE, 94831186579456, 94831186587647, +STORE, 94831186587648, 94831186591743, +SNULL, 140605484724223, 140605484728319, +STORE, 140605484720128, 140605484724223, +STORE, 140605484724224, 140605484728319, +ERASE, 140605484691456, 140605484720127, +STORE, 94831217156096, 94831217291263, +STORE, 94327178862592, 94327179075583, +STORE, 94327181172736, 94327181176831, +STORE, 94327181176832, 94327181185023, +STORE, 94327181185024, 94327181197311, +STORE, 94327185715200, 94327186685951, +STORE, 140172071755776, 140172073414655, +STORE, 140172073414656, 140172075511807, +STORE, 140172075511808, 140172075528191, +STORE, 140172075528192, 140172075536383, +STORE, 140172075536384, 140172075552767, +STORE, 140172075552768, 140172075565055, +STORE, 140172075565056, 140172077658111, +STORE, 140172077658112, 140172077662207, +STORE, 140172077662208, 140172077666303, +STORE, 140172077666304, 140172077809663, +STORE, 140172078178304, 140172079861759, +STORE, 140172079861760, 140172079878143, +STORE, 140172079878144, 140172079906815, +STORE, 140172079906816, 140172079910911, +STORE, 140172079910912, 140172079915007, +STORE, 140172079915008, 140172079919103, +STORE, 140720358359040, 140720358494207, +STORE, 140720358498304, 140720358510591, +STORE, 140720358510592, 140720358514687, +STORE, 140737488347136, 140737488351231, +STORE, 140737488343040, 140737488351231, +STORE, 140737488338944, 140737488351231, +STORE, 140734529933312, 140737488351231, +SNULL, 140734529945599, 140737488351231, +STORE, 140734529933312, 140734529945599, +STORE, 140734529802240, 140734529945599, +STORE, 4194304, 26279935, +STORE, 28372992, 28454911, +STORE, 28454912, 29806591, +STORE, 140249744060416, 140249746313215, +SNULL, 140249744203775, 140249746313215, +STORE, 140249744060416, 140249744203775, +STORE, 140249744203776, 140249746313215, +ERASE, 140249744203776, 140249746313215, +STORE, 140249746300928, 140249746309119, +STORE, 140249746309120, 140249746313215, +STORE, 140734530174976, 140734530179071, +STORE, 140734530162688, 140734530174975, +STORE, 140249746272256, 140249746300927, +STORE, 140249746264064, 140249746272255, +STORE, 140249740226560, 140249744060415, +SNULL, 140249740226560, 140249741934591, +STORE, 140249741934592, 140249744060415, +STORE, 140249740226560, 140249741934591, +SNULL, 140249744027647, 140249744060415, +STORE, 140249741934592, 140249744027647, +STORE, 140249744027648, 140249744060415, +ERASE, 140249744027648, 140249744060415, +STORE, 140249744027648, 140249744060415, +STORE, 140249738031104, 140249740226559, +SNULL, 140249738031104, 140249738125311, +STORE, 140249738125312, 140249740226559, +STORE, 140249738031104, 140249738125311, +SNULL, 140249740218367, 140249740226559, +STORE, 140249738125312, 140249740218367, +STORE, 140249740218368, 140249740226559, +ERASE, 140249740218368, 140249740226559, +STORE, 140249740218368, 140249740226559, +STORE, 140249735512064, 140249738031103, +SNULL, 140249735512064, 140249735925759, +STORE, 140249735925760, 140249738031103, +STORE, 140249735512064, 140249735925759, +SNULL, 140249738018815, 140249738031103, +STORE, 140249735925760, 140249738018815, +STORE, 140249738018816, 140249738031103, +ERASE, 140249738018816, 140249738031103, +STORE, 140249738018816, 140249738031103, +STORE, 140249732878336, 140249735512063, +SNULL, 140249732878336, 140249733406719, +STORE, 140249733406720, 140249735512063, +STORE, 140249732878336, 140249733406719, +SNULL, 140249735503871, 140249735512063, +STORE, 140249733406720, 140249735503871, +STORE, 140249735503872, 140249735512063, +ERASE, 140249735503872, 140249735512063, +STORE, 140249735503872, 140249735512063, +STORE, 140249730764800, 140249732878335, +SNULL, 140249730764800, 140249730777087, +STORE, 140249730777088, 140249732878335, +STORE, 140249730764800, 140249730777087, +SNULL, 140249732870143, 140249732878335, +STORE, 140249730777088, 140249732870143, +STORE, 140249732870144, 140249732878335, +ERASE, 140249732870144, 140249732878335, +STORE, 140249732870144, 140249732878335, +STORE, 140249728561152, 140249730764799, +SNULL, 140249728561152, 140249728663551, +STORE, 140249728663552, 140249730764799, +STORE, 140249728561152, 140249728663551, +SNULL, 140249730756607, 140249730764799, +STORE, 140249728663552, 140249730756607, +STORE, 140249730756608, 140249730764799, +ERASE, 140249730756608, 140249730764799, +STORE, 140249730756608, 140249730764799, +STORE, 140249746255872, 140249746272255, +STORE, 140249725399040, 140249728561151, +SNULL, 140249725399040, 140249726459903, +STORE, 140249726459904, 140249728561151, +STORE, 140249725399040, 140249726459903, +SNULL, 140249728552959, 140249728561151, +STORE, 140249726459904, 140249728552959, +STORE, 140249728552960, 140249728561151, +ERASE, 140249728552960, 140249728561151, +STORE, 140249728552960, 140249728561151, +STORE, 140249721602048, 140249725399039, +SNULL, 140249721602048, 140249723260927, +STORE, 140249723260928, 140249725399039, +STORE, 140249721602048, 140249723260927, +SNULL, 140249725358079, 140249725399039, +STORE, 140249723260928, 140249725358079, +STORE, 140249725358080, 140249725399039, +SNULL, 140249725358080, 140249725382655, +STORE, 140249725382656, 140249725399039, +STORE, 140249725358080, 140249725382655, +ERASE, 140249725358080, 140249725382655, +STORE, 140249725358080, 140249725382655, +ERASE, 140249725382656, 140249725399039, +STORE, 140249725382656, 140249725399039, +STORE, 140249746243584, 140249746272255, +SNULL, 140249725374463, 140249725382655, +STORE, 140249725358080, 140249725374463, +STORE, 140249725374464, 140249725382655, +SNULL, 140249728557055, 140249728561151, +STORE, 140249728552960, 140249728557055, +STORE, 140249728557056, 140249728561151, +SNULL, 140249730760703, 140249730764799, +STORE, 140249730756608, 140249730760703, +STORE, 140249730760704, 140249730764799, +SNULL, 140249732874239, 140249732878335, +STORE, 140249732870144, 140249732874239, +STORE, 140249732874240, 140249732878335, +SNULL, 140249735507967, 140249735512063, +STORE, 140249735503872, 140249735507967, +STORE, 140249735507968, 140249735512063, +SNULL, 140249738027007, 140249738031103, +STORE, 140249738018816, 140249738027007, +STORE, 140249738027008, 140249738031103, +SNULL, 140249740222463, 140249740226559, +STORE, 140249740218368, 140249740222463, +STORE, 140249740222464, 140249740226559, +SNULL, 140249744031743, 140249744060415, +STORE, 140249744027648, 140249744031743, +STORE, 140249744031744, 140249744060415, +SNULL, 28405759, 28454911, +STORE, 28372992, 28405759, +STORE, 28405760, 28454911, +SNULL, 140249746305023, 140249746309119, +STORE, 140249746300928, 140249746305023, +STORE, 140249746305024, 140249746309119, +ERASE, 140249746272256, 140249746300927, +STORE, 33853440, 33988607, +STORE, 140249744560128, 140249746243583, +STORE, 140249746296832, 140249746300927, +STORE, 140249744424960, 140249744560127, +STORE, 33853440, 34131967, +STORE, 140249719504896, 140249721602047, +STORE, 140249746288640, 140249746300927, +STORE, 140249746280448, 140249746300927, +STORE, 140249746243584, 140249746280447, +STORE, 140249744408576, 140249744560127, +STORE, 33853440, 34267135, +STORE, 33853440, 34422783, +STORE, 140249744400384, 140249744560127, +STORE, 140249744392192, 140249744560127, +STORE, 33853440, 34557951, +STORE, 33853440, 34693119, +STORE, 140249744375808, 140249744560127, +STORE, 140249744367616, 140249744560127, +STORE, 33853440, 34832383, +STORE, 140249719230464, 140249721602047, +STORE, 140249744207872, 140249744560127, +STORE, 33853440, 34971647, +SNULL, 34963455, 34971647, +STORE, 33853440, 34963455, +STORE, 34963456, 34971647, +ERASE, 34963456, 34971647, +SNULL, 34955263, 34963455, +STORE, 33853440, 34955263, +STORE, 34955264, 34963455, +ERASE, 34955264, 34963455, +SNULL, 34947071, 34955263, +STORE, 33853440, 34947071, +STORE, 34947072, 34955263, +ERASE, 34947072, 34955263, +SNULL, 34938879, 34947071, +STORE, 33853440, 34938879, +STORE, 34938880, 34947071, +ERASE, 34938880, 34947071, +STORE, 140249719214080, 140249721602047, +STORE, 140249719148544, 140249721602047, +STORE, 140249719115776, 140249721602047, +STORE, 140249717018624, 140249721602047, +STORE, 140249716953088, 140249721602047, +STORE, 33853440, 35086335, +STORE, 140249716822016, 140249721602047, +STORE, 140249716559872, 140249721602047, +STORE, 140249716551680, 140249721602047, +STORE, 140249716535296, 140249721602047, +STORE, 140249716527104, 140249721602047, +STORE, 140249716518912, 140249721602047, +STORE, 33853440, 35221503, +SNULL, 35213311, 35221503, +STORE, 33853440, 35213311, +STORE, 35213312, 35221503, +ERASE, 35213312, 35221503, +SNULL, 35205119, 35213311, +STORE, 33853440, 35205119, +STORE, 35205120, 35213311, +ERASE, 35205120, 35213311, +SNULL, 35192831, 35205119, +STORE, 33853440, 35192831, +STORE, 35192832, 35205119, +ERASE, 35192832, 35205119, +SNULL, 35176447, 35192831, +STORE, 33853440, 35176447, +STORE, 35176448, 35192831, +ERASE, 35176448, 35192831, +STORE, 140249716502528, 140249721602047, +STORE, 33853440, 35311615, +SNULL, 35307519, 35311615, +STORE, 33853440, 35307519, +STORE, 35307520, 35311615, +ERASE, 35307520, 35311615, +SNULL, 35303423, 35307519, +STORE, 33853440, 35303423, +STORE, 35303424, 35307519, +ERASE, 35303424, 35307519, +SNULL, 35299327, 35303423, +STORE, 33853440, 35299327, +STORE, 35299328, 35303423, +ERASE, 35299328, 35303423, +SNULL, 35295231, 35299327, +STORE, 33853440, 35295231, +STORE, 35295232, 35299327, +ERASE, 35295232, 35299327, +SNULL, 35291135, 35295231, +STORE, 33853440, 35291135, +STORE, 35291136, 35295231, +ERASE, 35291136, 35295231, +SNULL, 35287039, 35291135, +STORE, 33853440, 35287039, +STORE, 35287040, 35291135, +ERASE, 35287040, 35291135, +SNULL, 35282943, 35287039, +STORE, 33853440, 35282943, +STORE, 35282944, 35287039, +ERASE, 35282944, 35287039, +STORE, 140249716486144, 140249721602047, +STORE, 140249716453376, 140249721602047, +STORE, 33853440, 35418111, +SNULL, 35401727, 35418111, +STORE, 33853440, 35401727, +STORE, 35401728, 35418111, +ERASE, 35401728, 35418111, +SNULL, 35389439, 35401727, +STORE, 33853440, 35389439, +STORE, 35389440, 35401727, +ERASE, 35389440, 35401727, +STORE, 140249714356224, 140249721602047, +STORE, 33853440, 35540991, +STORE, 140249714339840, 140249721602047, +STORE, 140249714077696, 140249721602047, +STORE, 140249714069504, 140249721602047, +STORE, 140249714061312, 140249721602047, +STORE, 33853440, 35680255, +SNULL, 35672063, 35680255, +STORE, 33853440, 35672063, +STORE, 35672064, 35680255, +ERASE, 35672064, 35680255, +SNULL, 35627007, 35672063, +STORE, 33853440, 35627007, +STORE, 35627008, 35672063, +ERASE, 35627008, 35672063, +STORE, 140249711964160, 140249721602047, +STORE, 33853440, 35762175, +SNULL, 35753983, 35762175, +STORE, 33853440, 35753983, +STORE, 35753984, 35762175, +ERASE, 35753984, 35762175, +SNULL, 35745791, 35753983, +STORE, 33853440, 35745791, +STORE, 35745792, 35753983, +ERASE, 35745792, 35753983, +STORE, 140249711955968, 140249721602047, +STORE, 140249711947776, 140249721602047, +STORE, 140249710899200, 140249721602047, +STORE, 140249710866432, 140249721602047, +STORE, 140249710600192, 140249721602047, +SNULL, 140249744424959, 140249744560127, +STORE, 140249744207872, 140249744424959, +STORE, 140249744424960, 140249744560127, +ERASE, 140249744424960, 140249744560127, +STORE, 140249708503040, 140249721602047, +STORE, 33853440, 35885055, +STORE, 140249707978752, 140249721602047, +STORE, 140249705881600, 140249721602047, +STORE, 33853440, 36036607, +STORE, 33853440, 36175871, +STORE, 140249744551936, 140249744560127, +STORE, 140249744543744, 140249744560127, +STORE, 140249744535552, 140249744560127, +STORE, 140249744527360, 140249744560127, +STORE, 140249744519168, 140249744560127, +STORE, 140249705619456, 140249721602047, +STORE, 140249744510976, 140249744560127, +STORE, 140249744502784, 140249744560127, +STORE, 140249744494592, 140249744560127, +STORE, 140249744486400, 140249744560127, +STORE, 140249744478208, 140249744560127, +STORE, 140249744470016, 140249744560127, +STORE, 140249744461824, 140249744560127, +STORE, 140249744453632, 140249744560127, +STORE, 140249744445440, 140249744560127, +STORE, 140249744437248, 140249744560127, +STORE, 140249744429056, 140249744560127, +STORE, 140249703522304, 140249721602047, +STORE, 33853440, 36311039, +STORE, 140249703489536, 140249721602047, +STORE, 33853440, 36474879, +STORE, 140249703456768, 140249721602047, +STORE, 33853440, 36622335, +STORE, 140249703424000, 140249721602047, +STORE, 140249703391232, 140249721602047, +STORE, 33853440, 36810751, +STORE, 140249703358464, 140249721602047, +STORE, 140249703325696, 140249721602047, +SNULL, 36655103, 36810751, +STORE, 33853440, 36655103, +STORE, 36655104, 36810751, +ERASE, 36655104, 36810751, +SNULL, 36438015, 36655103, +STORE, 33853440, 36438015, +STORE, 36438016, 36655103, +ERASE, 36438016, 36655103, +STORE, 140249703317504, 140249721602047, +STORE, 140249701220352, 140249721602047, +STORE, 33853440, 36585471, +STORE, 33853440, 36782079, +STORE, 140249701212160, 140249721602047, +STORE, 140249701203968, 140249721602047, +STORE, 140249701195776, 140249721602047, +STORE, 140249701187584, 140249721602047, +STORE, 140249701179392, 140249721602047, +STORE, 140249701171200, 140249721602047, +STORE, 140249701163008, 140249721602047, +STORE, 140249701154816, 140249721602047, +STORE, 140249701146624, 140249721602047, +STORE, 140249701138432, 140249721602047, +STORE, 140249701130240, 140249721602047, +STORE, 140249700081664, 140249721602047, +STORE, 140249700073472, 140249721602047, +STORE, 33853440, 36978687, +STORE, 140249697976320, 140249721602047, +STORE, 33853440, 37240831, +STORE, 140249695879168, 140249721602047, +STORE, 140249695870976, 140249721602047, +STORE, 140249695862784, 140249721602047, +STORE, 140249695854592, 140249721602047, +STORE, 140249695326208, 140249721602047, +SNULL, 140249710600191, 140249721602047, +STORE, 140249695326208, 140249710600191, +STORE, 140249710600192, 140249721602047, +SNULL, 140249710600192, 140249710866431, +STORE, 140249710866432, 140249721602047, +STORE, 140249710600192, 140249710866431, +ERASE, 140249710600192, 140249710866431, +STORE, 140249691131904, 140249710600191, +STORE, 33853440, 37474303, +STORE, 140249710858240, 140249721602047, +STORE, 140249710850048, 140249721602047, +STORE, 140249710841856, 140249721602047, +STORE, 140249710833664, 140249721602047, +STORE, 140249710825472, 140249721602047, +STORE, 140249710817280, 140249721602047, +STORE, 140249710809088, 140249721602047, +STORE, 140249710800896, 140249721602047, +STORE, 140249710792704, 140249721602047, +STORE, 140249710784512, 140249721602047, +STORE, 140249710776320, 140249721602047, +STORE, 140249710768128, 140249721602047, +STORE, 140249710759936, 140249721602047, +STORE, 140249710751744, 140249721602047, +STORE, 140249710743552, 140249721602047, +STORE, 140249710735360, 140249721602047, +STORE, 140249689034752, 140249710600191, +STORE, 140249710727168, 140249721602047, +STORE, 140249686937600, 140249710600191, +STORE, 33853440, 37867519, +STORE, 140249684840448, 140249710600191, +STORE, 140249710718976, 140249721602047, +STORE, 140249682743296, 140249710600191, +STORE, 140249710710784, 140249721602047, +STORE, 140249710702592, 140249721602047, +STORE, 140249710694400, 140249721602047, +STORE, 140249710686208, 140249721602047, +STORE, 140249710678016, 140249721602047, +STORE, 140249682612224, 140249710600191, +STORE, 140249682087936, 140249710600191, +SNULL, 140249705619455, 140249710600191, +STORE, 140249682087936, 140249705619455, +STORE, 140249705619456, 140249710600191, +SNULL, 140249705619456, 140249705881599, +STORE, 140249705881600, 140249710600191, +STORE, 140249705619456, 140249705881599, +ERASE, 140249705619456, 140249705881599, +STORE, 140249679990784, 140249705619455, +STORE, 140249710669824, 140249721602047, +STORE, 140249677893632, 140249705619455, +STORE, 140249710653440, 140249721602047, +STORE, 140249710645248, 140249721602047, +STORE, 140249710637056, 140249721602047, +STORE, 140249710628864, 140249721602047, +STORE, 140249710620672, 140249721602047, +STORE, 140249710612480, 140249721602047, +STORE, 140249710604288, 140249721602047, +STORE, 140249705873408, 140249710600191, +STORE, 140249705865216, 140249710600191, +STORE, 140249705857024, 140249710600191, +STORE, 140249705848832, 140249710600191, +STORE, 140249705840640, 140249710600191, +STORE, 140249705832448, 140249710600191, +STORE, 140249705824256, 140249710600191, +STORE, 140249705816064, 140249710600191, +STORE, 140249705807872, 140249710600191, +STORE, 140249705799680, 140249710600191, +STORE, 33853440, 38129663, +SNULL, 140249744207872, 140249744367615, +STORE, 140249744367616, 140249744424959, +STORE, 140249744207872, 140249744367615, +ERASE, 140249744207872, 140249744367615, +STORE, 140249677606912, 140249705619455, +STORE, 140249675509760, 140249705619455, +SNULL, 140249677606911, 140249705619455, +STORE, 140249675509760, 140249677606911, +STORE, 140249677606912, 140249705619455, +SNULL, 140249677606912, 140249677893631, +STORE, 140249677893632, 140249705619455, +STORE, 140249677606912, 140249677893631, +ERASE, 140249677606912, 140249677893631, +STORE, 140249744359424, 140249744424959, +STORE, 33853440, 38391807, +STORE, 140249674981376, 140249677606911, +STORE, 140249672884224, 140249677606911, +SNULL, 140249719230463, 140249721602047, +STORE, 140249710604288, 140249719230463, +STORE, 140249719230464, 140249721602047, +SNULL, 140249719230464, 140249719504895, +STORE, 140249719504896, 140249721602047, +STORE, 140249719230464, 140249719504895, +ERASE, 140249719230464, 140249719504895, +STORE, 140249744351232, 140249744424959, +STORE, 140249744343040, 140249744424959, +STORE, 140249744334848, 140249744424959, +STORE, 140249744326656, 140249744424959, +STORE, 140249744310272, 140249744424959, +STORE, 140249744302080, 140249744424959, +STORE, 140249744285696, 140249744424959, +STORE, 140249744277504, 140249744424959, +STORE, 140249744261120, 140249744424959, +STORE, 140249744252928, 140249744424959, +STORE, 140249744220160, 140249744424959, +STORE, 140249744211968, 140249744424959, +STORE, 140249719488512, 140249721602047, +STORE, 140249744203776, 140249744424959, +STORE, 140249719472128, 140249721602047, +STORE, 140249719463936, 140249721602047, +STORE, 140249719447552, 140249721602047, +STORE, 140249719439360, 140249721602047, +STORE, 140249719406592, 140249721602047, +STORE, 140249719398400, 140249721602047, +STORE, 140249719382016, 140249721602047, +STORE, 140249719373824, 140249721602047, +STORE, 140249719357440, 140249721602047, +STORE, 140249719349248, 140249721602047, +STORE, 140249719332864, 140249721602047, +STORE, 140249719324672, 140249721602047, +STORE, 140249719291904, 140249721602047, +STORE, 140249719283712, 140249721602047, +STORE, 140249719267328, 140249721602047, +STORE, 140249719259136, 140249721602047, +STORE, 140249719242752, 140249721602047, +STORE, 140249719234560, 140249721602047, +STORE, 140249705783296, 140249710600191, +STORE, 140249705775104, 140249710600191, +STORE, 140249705742336, 140249710600191, +STORE, 140249705734144, 140249710600191, +STORE, 140249705717760, 140249710600191, +STORE, 140249670787072, 140249677606911, +STORE, 140249705709568, 140249710600191, +STORE, 140249705693184, 140249710600191, +STORE, 140249705684992, 140249710600191, +STORE, 140249705668608, 140249710600191, +STORE, 140249705660416, 140249710600191, +STORE, 140249705627648, 140249710600191, +STORE, 140249677893632, 140249710600191, +STORE, 140249677877248, 140249710600191, +STORE, 140249677869056, 140249710600191, +STORE, 140249677852672, 140249710600191, +STORE, 140249677844480, 140249710600191, +STORE, 140249677828096, 140249710600191, +STORE, 140249668689920, 140249677606911, +STORE, 140249677819904, 140249710600191, +STORE, 140249677787136, 140249710600191, +STORE, 140249677778944, 140249710600191, +STORE, 140249677762560, 140249710600191, +STORE, 140249677754368, 140249710600191, +STORE, 140249677737984, 140249710600191, +STORE, 140249677729792, 140249710600191, +STORE, 140249677713408, 140249710600191, +STORE, 140249677705216, 140249710600191, +STORE, 140249677672448, 140249710600191, +STORE, 140249677664256, 140249710600191, +STORE, 140249677647872, 140249710600191, +STORE, 140249677639680, 140249710600191, +STORE, 140249677623296, 140249710600191, +STORE, 140249677615104, 140249710600191, +STORE, 140249668673536, 140249677606911, +STORE, 140249668673536, 140249710600191, +STORE, 140249668640768, 140249710600191, +STORE, 140249668632576, 140249710600191, +STORE, 140249668616192, 140249710600191, +STORE, 140249668608000, 140249710600191, +STORE, 140249668591616, 140249710600191, +STORE, 140249668583424, 140249710600191, +STORE, 140249668567040, 140249710600191, +STORE, 140249668558848, 140249710600191, +STORE, 140249668526080, 140249710600191, +STORE, 140249668517888, 140249710600191, +STORE, 140249668501504, 140249710600191, +STORE, 140249668493312, 140249710600191, +STORE, 140249668476928, 140249710600191, +STORE, 140249668468736, 140249710600191, +STORE, 140249668452352, 140249710600191, +STORE, 140249668444160, 140249710600191, +STORE, 140249668411392, 140249710600191, +STORE, 140249668403200, 140249710600191, +STORE, 140249668386816, 140249710600191, +STORE, 140249668378624, 140249710600191, +STORE, 140249668362240, 140249710600191, +STORE, 140249668354048, 140249710600191, +STORE, 140249668337664, 140249710600191, +STORE, 140249668329472, 140249710600191, +STORE, 140249668296704, 140249710600191, +STORE, 140249668288512, 140249710600191, +STORE, 140249668272128, 140249710600191, +STORE, 140249668263936, 140249710600191, +STORE, 140249668247552, 140249710600191, +STORE, 140249668239360, 140249710600191, +STORE, 140249668222976, 140249710600191, +STORE, 140249668214784, 140249710600191, +STORE, 140249668182016, 140249710600191, +STORE, 140249668173824, 140249710600191, +STORE, 140249668157440, 140249710600191, +STORE, 140249668149248, 140249710600191, +STORE, 140249668132864, 140249710600191, +STORE, 140249668124672, 140249710600191, +STORE, 140249668108288, 140249710600191, +STORE, 140249668100096, 140249710600191, +STORE, 140249668067328, 140249710600191, +STORE, 140249668059136, 140249710600191, +STORE, 140249668042752, 140249710600191, +STORE, 140249668034560, 140249710600191, +STORE, 140249668018176, 140249710600191, +STORE, 140249668009984, 140249710600191, +STORE, 140249667993600, 140249710600191, +STORE, 140249667985408, 140249710600191, +STORE, 140249667952640, 140249710600191, +STORE, 140249667944448, 140249710600191, +STORE, 140249667928064, 140249710600191, +STORE, 140249667919872, 140249710600191, +STORE, 140249667903488, 140249710600191, +STORE, 140249667895296, 140249710600191, +STORE, 140249667878912, 140249710600191, +STORE, 140249667870720, 140249710600191, +STORE, 140249667837952, 140249710600191, +STORE, 140249667829760, 140249710600191, +STORE, 140249667813376, 140249710600191, +STORE, 140249667805184, 140249710600191, +STORE, 140249667788800, 140249710600191, +STORE, 140249667780608, 140249710600191, +STORE, 140249667764224, 140249710600191, +STORE, 140249667756032, 140249710600191, +STORE, 140249667723264, 140249710600191, +STORE, 140249667715072, 140249710600191, +STORE, 140249667698688, 140249710600191, +STORE, 140249667690496, 140249710600191, +STORE, 140249667674112, 140249710600191, +STORE, 140249667665920, 140249710600191, +STORE, 140249667649536, 140249710600191, +STORE, 140249667641344, 140249710600191, +STORE, 140249667608576, 140249710600191, +STORE, 140249667600384, 140249710600191, +STORE, 140249667584000, 140249710600191, +STORE, 140249667575808, 140249710600191, +STORE, 140249667559424, 140249710600191, +STORE, 140249667551232, 140249710600191, +STORE, 140249667534848, 140249710600191, +STORE, 140249667526656, 140249710600191, +STORE, 140249667493888, 140249710600191, +STORE, 140249667485696, 140249710600191, +STORE, 140249667469312, 140249710600191, +STORE, 140249667461120, 140249710600191, +STORE, 140249667444736, 140249710600191, +STORE, 140249667436544, 140249710600191, +STORE, 140249667420160, 140249710600191, +STORE, 140249665323008, 140249710600191, +STORE, 140249665314816, 140249710600191, +STORE, 140249665282048, 140249710600191, +STORE, 140249665273856, 140249710600191, +STORE, 140249665257472, 140249710600191, +STORE, 140249665249280, 140249710600191, +STORE, 140249665232896, 140249710600191, +STORE, 140249665224704, 140249710600191, +STORE, 140249665208320, 140249710600191, +STORE, 140249665200128, 140249710600191, +STORE, 140249665167360, 140249710600191, +STORE, 140249665159168, 140249710600191, +STORE, 140249665142784, 140249710600191, +STORE, 140249665134592, 140249710600191, +STORE, 140249665118208, 140249710600191, +STORE, 140249665110016, 140249710600191, +STORE, 140249665093632, 140249710600191, +STORE, 140249665085440, 140249710600191, +STORE, 140249665052672, 140249710600191, +STORE, 140249665044480, 140249710600191, +STORE, 140249665028096, 140249710600191, +STORE, 140249665019904, 140249710600191, +STORE, 140249665003520, 140249710600191, +STORE, 140249664995328, 140249710600191, +STORE, 140249664978944, 140249710600191, +STORE, 140249664970752, 140249710600191, +STORE, 140249664937984, 140249710600191, +STORE, 140249664929792, 140249710600191, +STORE, 140249664913408, 140249710600191, +STORE, 140249664905216, 140249710600191, +STORE, 140249664888832, 140249710600191, +STORE, 140249664880640, 140249710600191, +STORE, 140249664864256, 140249710600191, +STORE, 140249664856064, 140249710600191, +STORE, 140249664823296, 140249710600191, +STORE, 140249664815104, 140249710600191, +STORE, 140249664798720, 140249710600191, +STORE, 140249664790528, 140249710600191, +STORE, 140249664774144, 140249710600191, +STORE, 140249664765952, 140249710600191, +STORE, 140249664749568, 140249710600191, +STORE, 140249664741376, 140249710600191, +STORE, 140249664708608, 140249710600191, +STORE, 140249664700416, 140249710600191, +STORE, 140249664684032, 140249710600191, +STORE, 140249664675840, 140249710600191, +STORE, 140249664659456, 140249710600191, +STORE, 140249664651264, 140249710600191, +STORE, 140249664634880, 140249710600191, +STORE, 140249664626688, 140249710600191, +STORE, 140249664593920, 140249710600191, +STORE, 140249664585728, 140249710600191, +STORE, 140249664569344, 140249710600191, +STORE, 140249664561152, 140249710600191, +STORE, 140249664544768, 140249710600191, +STORE, 140249664536576, 140249710600191, +STORE, 140249664520192, 140249710600191, +STORE, 140249664512000, 140249710600191, +STORE, 140249664479232, 140249710600191, +STORE, 140249664471040, 140249710600191, +STORE, 140249664454656, 140249710600191, +STORE, 140249664446464, 140249710600191, +STORE, 140249664430080, 140249710600191, +STORE, 140249664421888, 140249710600191, +STORE, 140249664405504, 140249710600191, +STORE, 140249664397312, 140249710600191, +STORE, 140249664364544, 140249710600191, +STORE, 140249664356352, 140249710600191, +STORE, 140249664339968, 140249710600191, +STORE, 140249664331776, 140249710600191, +STORE, 140249664315392, 140249710600191, +STORE, 140249664307200, 140249710600191, +STORE, 140249664290816, 140249710600191, +STORE, 140249664282624, 140249710600191, +STORE, 140249664249856, 140249710600191, +STORE, 140249664241664, 140249710600191, +STORE, 140249664225280, 140249710600191, +STORE, 140249664217088, 140249710600191, +STORE, 140249664200704, 140249710600191, +STORE, 140249664192512, 140249710600191, +STORE, 140249664176128, 140249710600191, +STORE, 140249664167936, 140249710600191, +STORE, 140249664135168, 140249710600191, +STORE, 140249664126976, 140249710600191, +STORE, 140249664110592, 140249710600191, +STORE, 140249664102400, 140249710600191, +STORE, 140249664086016, 140249710600191, +STORE, 140249664077824, 140249710600191, +STORE, 140249664061440, 140249710600191, +STORE, 140249664053248, 140249710600191, +STORE, 140249664020480, 140249710600191, +STORE, 140249664012288, 140249710600191, +STORE, 140249663995904, 140249710600191, +STORE, 140249663987712, 140249710600191, +STORE, 140249663971328, 140249710600191, +STORE, 140249663963136, 140249710600191, +STORE, 140249663946752, 140249710600191, +STORE, 140249663938560, 140249710600191, +STORE, 140249663905792, 140249710600191, +STORE, 140249663897600, 140249710600191, +STORE, 140249663881216, 140249710600191, +STORE, 140249663873024, 140249710600191, +STORE, 140249663856640, 140249710600191, +STORE, 140249663848448, 140249710600191, +STORE, 140249663832064, 140249710600191, +STORE, 140249663823872, 140249710600191, +STORE, 140249663791104, 140249710600191, +STORE, 140249663782912, 140249710600191, +STORE, 140249663766528, 140249710600191, +STORE, 140249663758336, 140249710600191, +STORE, 140249663741952, 140249710600191, +STORE, 140249663733760, 140249710600191, +STORE, 140249663717376, 140249710600191, +STORE, 140249663709184, 140249710600191, +STORE, 140249663676416, 140249710600191, +STORE, 140249663668224, 140249710600191, +STORE, 140249663651840, 140249710600191, +STORE, 140249663643648, 140249710600191, +STORE, 140249663627264, 140249710600191, +STORE, 33853440, 38526975, +STORE, 140249663619072, 140249710600191, +STORE, 140249663602688, 140249710600191, +STORE, 140249661505536, 140249710600191, +STORE, 140249661497344, 140249710600191, +STORE, 140249661464576, 140249710600191, +STORE, 140249661456384, 140249710600191, +STORE, 140249661440000, 140249710600191, +STORE, 140249661431808, 140249710600191, +STORE, 140249661415424, 140249710600191, +STORE, 140249661407232, 140249710600191, +STORE, 140249661390848, 140249710600191, +STORE, 140249661382656, 140249710600191, +STORE, 140249661349888, 140249710600191, +STORE, 140249661341696, 140249710600191, +STORE, 140249661325312, 140249710600191, +STORE, 140249661317120, 140249710600191, +STORE, 140249661300736, 140249710600191, +STORE, 140249661292544, 140249710600191, +STORE, 140249661276160, 140249710600191, +STORE, 140249661267968, 140249710600191, +STORE, 140249661235200, 140249710600191, +STORE, 140249661227008, 140249710600191, +STORE, 140249661210624, 140249710600191, +STORE, 140249661202432, 140249710600191, +STORE, 140249661186048, 140249710600191, +STORE, 140249661177856, 140249710600191, +STORE, 140249661161472, 140249710600191, +STORE, 140249661153280, 140249710600191, +STORE, 140249661120512, 140249710600191, +STORE, 140249661112320, 140249710600191, +STORE, 140249661095936, 140249710600191, +STORE, 140249661087744, 140249710600191, +STORE, 140249661071360, 140249710600191, +STORE, 140249661063168, 140249710600191, +STORE, 140249661046784, 140249710600191, +STORE, 140249661038592, 140249710600191, +STORE, 140249661005824, 140249710600191, +STORE, 140249660997632, 140249710600191, +STORE, 140249660981248, 140249710600191, +STORE, 140249660973056, 140249710600191, +STORE, 140249660956672, 140249710600191, +STORE, 140249660948480, 140249710600191, +STORE, 140249660932096, 140249710600191, +STORE, 140249660923904, 140249710600191, +STORE, 140249660891136, 140249710600191, +STORE, 140249660882944, 140249710600191, +STORE, 140249660866560, 140249710600191, +STORE, 140249660858368, 140249710600191, +STORE, 140249660841984, 140249710600191, +STORE, 140249660833792, 140249710600191, +STORE, 140249660817408, 140249710600191, +STORE, 140249660809216, 140249710600191, +STORE, 140249660776448, 140249710600191, +STORE, 140249660768256, 140249710600191, +STORE, 140249660751872, 140249710600191, +STORE, 140249660743680, 140249710600191, +STORE, 140249660727296, 140249710600191, +STORE, 140249660719104, 140249710600191, +STORE, 140249660702720, 140249710600191, +STORE, 140249660694528, 140249710600191, +STORE, 140249660661760, 140249710600191, +STORE, 140249660653568, 140249710600191, +STORE, 140249660637184, 140249710600191, +STORE, 140249660628992, 140249710600191, +STORE, 140249660612608, 140249710600191, +STORE, 140249660604416, 140249710600191, +STORE, 140249660588032, 140249710600191, +STORE, 140249660579840, 140249710600191, +STORE, 140249660547072, 140249710600191, +STORE, 140249660538880, 140249710600191, +STORE, 140249660522496, 140249710600191, +STORE, 140249660514304, 140249710600191, +STORE, 140249660497920, 140249710600191, +STORE, 140249660489728, 140249710600191, +STORE, 140249660473344, 140249710600191, +STORE, 140249660465152, 140249710600191, +STORE, 140249660432384, 140249710600191, +STORE, 140249660424192, 140249710600191, +STORE, 140249660407808, 140249710600191, +STORE, 140249660399616, 140249710600191, +STORE, 140249660383232, 140249710600191, +STORE, 140249660375040, 140249710600191, +STORE, 140249660358656, 140249710600191, +STORE, 140249660350464, 140249710600191, +STORE, 140249660317696, 140249710600191, +STORE, 140249660309504, 140249710600191, +STORE, 140249660293120, 140249710600191, +STORE, 140249660284928, 140249710600191, +STORE, 140249660268544, 140249710600191, +STORE, 140249660260352, 140249710600191, +STORE, 140249660243968, 140249710600191, +STORE, 140249660235776, 140249710600191, +STORE, 140249660203008, 140249710600191, +STORE, 140249660194816, 140249710600191, +STORE, 140249660178432, 140249710600191, +STORE, 140249660170240, 140249710600191, +STORE, 140249660153856, 140249710600191, +STORE, 140249660145664, 140249710600191, +STORE, 140249660129280, 140249710600191, +STORE, 140249660121088, 140249710600191, +STORE, 140249660088320, 140249710600191, +STORE, 140249660080128, 140249710600191, +STORE, 140249660063744, 140249710600191, +STORE, 140249660055552, 140249710600191, +STORE, 140249660039168, 140249710600191, +STORE, 140249660030976, 140249710600191, +STORE, 140249660014592, 140249710600191, +STORE, 140249660006400, 140249710600191, +STORE, 140249659973632, 140249710600191, +STORE, 140249659965440, 140249710600191, +STORE, 140249659949056, 140249710600191, +STORE, 140249659940864, 140249710600191, +STORE, 140249659924480, 140249710600191, +STORE, 140249659916288, 140249710600191, +STORE, 140249659899904, 140249710600191, +STORE, 140249659891712, 140249710600191, +STORE, 140249659858944, 140249710600191, +STORE, 140249659850752, 140249710600191, +STORE, 140249659834368, 140249710600191, +STORE, 140249659826176, 140249710600191, +STORE, 140249659809792, 140249710600191, +STORE, 140249659801600, 140249710600191, +STORE, 140249659785216, 140249710600191, +STORE, 140249657688064, 140249710600191, +STORE, 140249657679872, 140249710600191, +STORE, 140249657647104, 140249710600191, +STORE, 140249657638912, 140249710600191, +STORE, 140249657622528, 140249710600191, +STORE, 140249657614336, 140249710600191, +STORE, 140249657597952, 140249710600191, +STORE, 140249657589760, 140249710600191, +STORE, 140249657573376, 140249710600191, +STORE, 140249657565184, 140249710600191, +STORE, 140249657532416, 140249710600191, +STORE, 140249657524224, 140249710600191, +STORE, 140249657507840, 140249710600191, +STORE, 140249657499648, 140249710600191, +STORE, 140249657483264, 140249710600191, +STORE, 140249657475072, 140249710600191, +STORE, 140249657458688, 140249710600191, +STORE, 140249657450496, 140249710600191, +STORE, 140249657417728, 140249710600191, +STORE, 140249657409536, 140249710600191, +STORE, 140249657393152, 140249710600191, +STORE, 140249657384960, 140249710600191, +STORE, 140249657368576, 140249710600191, +STORE, 140249657360384, 140249710600191, +STORE, 140249657344000, 140249710600191, +STORE, 140249657335808, 140249710600191, +STORE, 140249657303040, 140249710600191, +STORE, 140249657294848, 140249710600191, +STORE, 140249657278464, 140249710600191, +STORE, 140249657270272, 140249710600191, +STORE, 140249657253888, 140249710600191, +STORE, 140249657245696, 140249710600191, +STORE, 140249657229312, 140249710600191, +STORE, 140249657221120, 140249710600191, +STORE, 140249657188352, 140249710600191, +STORE, 140249657180160, 140249710600191, +STORE, 140249657163776, 140249710600191, +STORE, 140249657155584, 140249710600191, +STORE, 140249657139200, 140249710600191, +STORE, 140249657131008, 140249710600191, +STORE, 140249657114624, 140249710600191, +STORE, 140249657106432, 140249710600191, +STORE, 140249657073664, 140249710600191, +STORE, 140249657065472, 140249710600191, +STORE, 140249657049088, 140249710600191, +STORE, 140249657040896, 140249710600191, +STORE, 140249657024512, 140249710600191, +STORE, 140249657016320, 140249710600191, +STORE, 140249656999936, 140249710600191, +STORE, 140249656991744, 140249710600191, +STORE, 140249656958976, 140249710600191, +STORE, 140249656950784, 140249710600191, +STORE, 140249656934400, 140249710600191, +STORE, 140249656926208, 140249710600191, +STORE, 140249656909824, 140249710600191, +STORE, 140249656901632, 140249710600191, +STORE, 140249656885248, 140249710600191, +STORE, 140249656877056, 140249710600191, +STORE, 140249656844288, 140249710600191, +STORE, 140249656836096, 140249710600191, +STORE, 140249656819712, 140249710600191, +STORE, 140249656811520, 140249710600191, +STORE, 140249656795136, 140249710600191, +STORE, 33853440, 38662143, +STORE, 140249656786944, 140249710600191, +STORE, 140249656770560, 140249710600191, +STORE, 140249656762368, 140249710600191, +STORE, 140249656729600, 140249710600191, +STORE, 140249656721408, 140249710600191, +STORE, 140249656705024, 140249710600191, +STORE, 140249656696832, 140249710600191, +STORE, 140249656680448, 140249710600191, +STORE, 140249656672256, 140249710600191, +STORE, 140249656655872, 140249710600191, +STORE, 140249656647680, 140249710600191, +STORE, 140249656614912, 140249710600191, +STORE, 140249656606720, 140249710600191, +STORE, 140249656590336, 140249710600191, +STORE, 140249656582144, 140249710600191, +STORE, 140249656565760, 140249710600191, +STORE, 140249656557568, 140249710600191, +STORE, 140249656541184, 140249710600191, +STORE, 140249656532992, 140249710600191, +STORE, 140249656500224, 140249710600191, +STORE, 140249656492032, 140249710600191, +STORE, 140249656475648, 140249710600191, +STORE, 140249656467456, 140249710600191, +STORE, 140249656451072, 140249710600191, +STORE, 140249656442880, 140249710600191, +STORE, 140249656426496, 140249710600191, +STORE, 140249656418304, 140249710600191, +STORE, 140249656385536, 140249710600191, +STORE, 140249656377344, 140249710600191, +STORE, 140249656360960, 140249710600191, +STORE, 140249656352768, 140249710600191, +STORE, 140249656336384, 140249710600191, +STORE, 140249656328192, 140249710600191, +STORE, 140249656311808, 140249710600191, +STORE, 140249656303616, 140249710600191, +STORE, 140249656270848, 140249710600191, +STORE, 140249656262656, 140249710600191, +STORE, 140249656246272, 140249710600191, +STORE, 140249656238080, 140249710600191, +STORE, 140249656221696, 140249710600191, +STORE, 140249656213504, 140249710600191, +STORE, 140249656197120, 140249710600191, +STORE, 140249656188928, 140249710600191, +STORE, 140249656156160, 140249710600191, +STORE, 140249656147968, 140249710600191, +STORE, 140249656131584, 140249710600191, +STORE, 140249656123392, 140249710600191, +STORE, 140249656107008, 140249710600191, +STORE, 140249656098816, 140249710600191, +STORE, 140249656082432, 140249710600191, +STORE, 140249656074240, 140249710600191, +STORE, 140249656041472, 140249710600191, +STORE, 140249656033280, 140249710600191, +STORE, 140249656016896, 140249710600191, +STORE, 140249656008704, 140249710600191, +STORE, 140249655992320, 140249710600191, +STORE, 140249655984128, 140249710600191, +STORE, 140249655967744, 140249710600191, +STORE, 140249653870592, 140249710600191, +STORE, 140249653862400, 140249710600191, +STORE, 140249653829632, 140249710600191, +STORE, 140249653821440, 140249710600191, +STORE, 140249653805056, 140249710600191, +STORE, 140249653796864, 140249710600191, +STORE, 140249653780480, 140249710600191, +STORE, 140249653772288, 140249710600191, +STORE, 140249653755904, 140249710600191, +STORE, 140249652703232, 140249710600191, +SNULL, 140249682087935, 140249710600191, +STORE, 140249652703232, 140249682087935, +STORE, 140249682087936, 140249710600191, + }; + + unsigned long set26[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140729464770560, 140737488351231, +SNULL, 140729464774655, 140737488351231, +STORE, 140729464770560, 140729464774655, +STORE, 140729464639488, 140729464774655, +STORE, 4194304, 5066751, +STORE, 7159808, 7172095, +STORE, 7172096, 7180287, +STORE, 140729465114624, 140729465118719, +STORE, 140729465102336, 140729465114623, +STORE, 30867456, 30875647, +STORE, 30867456, 31010815, +STORE, 140109040988160, 140109042671615, +STORE, 140109040959488, 140109040988159, +STORE, 140109040943104, 140109040959487, +ERASE, 140109040943104, 140109040959487, +STORE, 140109040840704, 140109040959487, +ERASE, 140109040840704, 140109040959487, +STORE, 140109040951296, 140109040959487, +ERASE, 140109040951296, 140109040959487, +STORE, 140109040955392, 140109040959487, +ERASE, 140109040955392, 140109040959487, + }; + unsigned long set27[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140726128070656, 140737488351231, +SNULL, 140726128074751, 140737488351231, +STORE, 140726128070656, 140726128074751, +STORE, 140726127939584, 140726128074751, +STORE, 94478497189888, 94478499303423, +SNULL, 94478497202175, 94478499303423, +STORE, 94478497189888, 94478497202175, +STORE, 94478497202176, 94478499303423, +ERASE, 94478497202176, 94478499303423, +STORE, 94478499295232, 94478499303423, +STORE, 140415605723136, 140415607975935, +SNULL, 140415605866495, 140415607975935, +STORE, 140415605723136, 140415605866495, +STORE, 140415605866496, 140415607975935, +ERASE, 140415605866496, 140415607975935, +STORE, 140415607963648, 140415607971839, +STORE, 140415607971840, 140415607975935, +STORE, 140726130024448, 140726130028543, +STORE, 140726130012160, 140726130024447, +STORE, 140415607934976, 140415607963647, +STORE, 140415607926784, 140415607934975, +STORE, 140415603245056, 140415605723135, +SNULL, 140415603245056, 140415603613695, +STORE, 140415603613696, 140415605723135, +STORE, 140415603245056, 140415603613695, +SNULL, 140415605710847, 140415605723135, +STORE, 140415603613696, 140415605710847, +STORE, 140415605710848, 140415605723135, +ERASE, 140415605710848, 140415605723135, +STORE, 140415605710848, 140415605723135, +STORE, 140415599370240, 140415603245055, +SNULL, 140415599370240, 140415601111039, +STORE, 140415601111040, 140415603245055, +STORE, 140415599370240, 140415601111039, +SNULL, 140415603208191, 140415603245055, +STORE, 140415601111040, 140415603208191, +STORE, 140415603208192, 140415603245055, +ERASE, 140415603208192, 140415603245055, +STORE, 140415603208192, 140415603245055, +STORE, 140415595692032, 140415599370239, +SNULL, 140415595692032, 140415597207551, +STORE, 140415597207552, 140415599370239, +STORE, 140415595692032, 140415597207551, +SNULL, 140415599304703, 140415599370239, +STORE, 140415597207552, 140415599304703, +STORE, 140415599304704, 140415599370239, +SNULL, 140415599304704, 140415599353855, +STORE, 140415599353856, 140415599370239, +STORE, 140415599304704, 140415599353855, +ERASE, 140415599304704, 140415599353855, +STORE, 140415599304704, 140415599353855, +ERASE, 140415599353856, 140415599370239, +STORE, 140415599353856, 140415599370239, +STORE, 140415593500672, 140415595692031, +SNULL, 140415593500672, 140415593590783, +STORE, 140415593590784, 140415595692031, +STORE, 140415593500672, 140415593590783, +SNULL, 140415595683839, 140415595692031, +STORE, 140415593590784, 140415595683839, +STORE, 140415595683840, 140415595692031, +ERASE, 140415595683840, 140415595692031, +STORE, 140415595683840, 140415595692031, +STORE, 140415589703680, 140415593500671, +SNULL, 140415589703680, 140415591362559, +STORE, 140415591362560, 140415593500671, +STORE, 140415589703680, 140415591362559, +SNULL, 140415593459711, 140415593500671, +STORE, 140415591362560, 140415593459711, +STORE, 140415593459712, 140415593500671, +SNULL, 140415593459712, 140415593484287, +STORE, 140415593484288, 140415593500671, +STORE, 140415593459712, 140415593484287, +ERASE, 140415593459712, 140415593484287, +STORE, 140415593459712, 140415593484287, +ERASE, 140415593484288, 140415593500671, +STORE, 140415593484288, 140415593500671, +STORE, 140415587590144, 140415589703679, +SNULL, 140415587590144, 140415587602431, +STORE, 140415587602432, 140415589703679, +STORE, 140415587590144, 140415587602431, +SNULL, 140415589695487, 140415589703679, +STORE, 140415587602432, 140415589695487, +STORE, 140415589695488, 140415589703679, +ERASE, 140415589695488, 140415589703679, +STORE, 140415589695488, 140415589703679, +STORE, 140415607918592, 140415607934975, +STORE, 140415585398784, 140415587590143, +SNULL, 140415585398784, 140415585480703, +STORE, 140415585480704, 140415587590143, +STORE, 140415585398784, 140415585480703, +SNULL, 140415587573759, 140415587590143, +STORE, 140415585480704, 140415587573759, +STORE, 140415587573760, 140415587590143, +SNULL, 140415587573760, 140415587581951, +STORE, 140415587581952, 140415587590143, +STORE, 140415587573760, 140415587581951, +ERASE, 140415587573760, 140415587581951, +STORE, 140415587573760, 140415587581951, +ERASE, 140415587581952, 140415587590143, +STORE, 140415587581952, 140415587590143, +STORE, 140415583182848, 140415585398783, +SNULL, 140415583182848, 140415583281151, +STORE, 140415583281152, 140415585398783, +STORE, 140415583182848, 140415583281151, +SNULL, 140415585374207, 140415585398783, +STORE, 140415583281152, 140415585374207, +STORE, 140415585374208, 140415585398783, +SNULL, 140415585374208, 140415585382399, +STORE, 140415585382400, 140415585398783, +STORE, 140415585374208, 140415585382399, +ERASE, 140415585374208, 140415585382399, +STORE, 140415585374208, 140415585382399, +ERASE, 140415585382400, 140415585398783, +STORE, 140415585382400, 140415585398783, +STORE, 140415580979200, 140415583182847, +SNULL, 140415580979200, 140415581081599, +STORE, 140415581081600, 140415583182847, +STORE, 140415580979200, 140415581081599, +SNULL, 140415583174655, 140415583182847, +STORE, 140415581081600, 140415583174655, +STORE, 140415583174656, 140415583182847, +ERASE, 140415583174656, 140415583182847, +STORE, 140415583174656, 140415583182847, +STORE, 140415578816512, 140415580979199, +SNULL, 140415578816512, 140415578877951, +STORE, 140415578877952, 140415580979199, +STORE, 140415578816512, 140415578877951, +SNULL, 140415580971007, 140415580979199, +STORE, 140415578877952, 140415580971007, +STORE, 140415580971008, 140415580979199, +ERASE, 140415580971008, 140415580979199, +STORE, 140415580971008, 140415580979199, +STORE, 140415576563712, 140415578816511, +SNULL, 140415576563712, 140415576715263, +STORE, 140415576715264, 140415578816511, +STORE, 140415576563712, 140415576715263, +SNULL, 140415578808319, 140415578816511, +STORE, 140415576715264, 140415578808319, +STORE, 140415578808320, 140415578816511, +ERASE, 140415578808320, 140415578816511, +STORE, 140415578808320, 140415578816511, +STORE, 140415574392832, 140415576563711, +SNULL, 140415574392832, 140415574462463, +STORE, 140415574462464, 140415576563711, +STORE, 140415574392832, 140415574462463, +SNULL, 140415576555519, 140415576563711, +STORE, 140415574462464, 140415576555519, +STORE, 140415576555520, 140415576563711, +ERASE, 140415576555520, 140415576563711, +STORE, 140415576555520, 140415576563711, +STORE, 140415607910400, 140415607934975, +STORE, 140415571230720, 140415574392831, +SNULL, 140415571230720, 140415572291583, +STORE, 140415572291584, 140415574392831, +STORE, 140415571230720, 140415572291583, +SNULL, 140415574384639, 140415574392831, +STORE, 140415572291584, 140415574384639, +STORE, 140415574384640, 140415574392831, +ERASE, 140415574384640, 140415574392831, +STORE, 140415574384640, 140415574392831, +STORE, 140415607902208, 140415607934975, +SNULL, 140415593476095, 140415593484287, +STORE, 140415593459712, 140415593476095, +STORE, 140415593476096, 140415593484287, +SNULL, 140415574388735, 140415574392831, +STORE, 140415574384640, 140415574388735, +STORE, 140415574388736, 140415574392831, +SNULL, 140415576559615, 140415576563711, +STORE, 140415576555520, 140415576559615, +STORE, 140415576559616, 140415576563711, +SNULL, 140415589699583, 140415589703679, +STORE, 140415589695488, 140415589699583, +STORE, 140415589699584, 140415589703679, +SNULL, 140415585378303, 140415585382399, +STORE, 140415585374208, 140415585378303, +STORE, 140415585378304, 140415585382399, +SNULL, 140415578812415, 140415578816511, +STORE, 140415578808320, 140415578812415, +STORE, 140415578812416, 140415578816511, +SNULL, 140415580975103, 140415580979199, +STORE, 140415580971008, 140415580975103, +STORE, 140415580975104, 140415580979199, +SNULL, 140415583178751, 140415583182847, +STORE, 140415583174656, 140415583178751, +STORE, 140415583178752, 140415583182847, +SNULL, 140415587577855, 140415587581951, +STORE, 140415587573760, 140415587577855, +STORE, 140415587577856, 140415587581951, +SNULL, 140415595687935, 140415595692031, +STORE, 140415595683840, 140415595687935, +STORE, 140415595687936, 140415595692031, +STORE, 140415607894016, 140415607934975, +SNULL, 140415599345663, 140415599353855, +STORE, 140415599304704, 140415599345663, +STORE, 140415599345664, 140415599353855, +SNULL, 140415603240959, 140415603245055, +STORE, 140415603208192, 140415603240959, +STORE, 140415603240960, 140415603245055, +SNULL, 140415605719039, 140415605723135, +STORE, 140415605710848, 140415605719039, +STORE, 140415605719040, 140415605723135, +SNULL, 94478499299327, 94478499303423, +STORE, 94478499295232, 94478499299327, +STORE, 94478499299328, 94478499303423, +SNULL, 140415607967743, 140415607971839, +STORE, 140415607963648, 140415607967743, +STORE, 140415607967744, 140415607971839, +ERASE, 140415607934976, 140415607963647, +STORE, 94478511173632, 94478511378431, +STORE, 140415606210560, 140415607894015, +STORE, 140415607934976, 140415607963647, +STORE, 94478511173632, 94478511513599, +STORE, 94478511173632, 94478511648767, +SNULL, 94478511615999, 94478511648767, +STORE, 94478511173632, 94478511615999, +STORE, 94478511616000, 94478511648767, +ERASE, 94478511616000, 94478511648767, +STORE, 94478511173632, 94478511751167, +SNULL, 94478511747071, 94478511751167, +STORE, 94478511173632, 94478511747071, +STORE, 94478511747072, 94478511751167, +ERASE, 94478511747072, 94478511751167, +STORE, 94478511173632, 94478511882239, +SNULL, 94478511878143, 94478511882239, +STORE, 94478511173632, 94478511878143, +STORE, 94478511878144, 94478511882239, +ERASE, 94478511878144, 94478511882239, +STORE, 94478511173632, 94478512013311, +SNULL, 94478512009215, 94478512013311, +STORE, 94478511173632, 94478512009215, +STORE, 94478512009216, 94478512013311, +ERASE, 94478512009216, 94478512013311, +STORE, 94478511173632, 94478512144383, +STORE, 94478511173632, 94478512279551, +STORE, 140415606181888, 140415606210559, +STORE, 140415569100800, 140415571230719, +SNULL, 140415569100800, 140415569129471, +STORE, 140415569129472, 140415571230719, +STORE, 140415569100800, 140415569129471, +SNULL, 140415571222527, 140415571230719, +STORE, 140415569129472, 140415571222527, +STORE, 140415571222528, 140415571230719, +ERASE, 140415571222528, 140415571230719, +STORE, 140415571222528, 140415571230719, +STORE, 140415566905344, 140415569100799, +SNULL, 140415566905344, 140415566987263, +STORE, 140415566987264, 140415569100799, +STORE, 140415566905344, 140415566987263, +SNULL, 140415569084415, 140415569100799, +STORE, 140415566987264, 140415569084415, +STORE, 140415569084416, 140415569100799, +SNULL, 140415569084416, 140415569092607, +STORE, 140415569092608, 140415569100799, +STORE, 140415569084416, 140415569092607, +ERASE, 140415569084416, 140415569092607, +STORE, 140415569084416, 140415569092607, +ERASE, 140415569092608, 140415569100799, +STORE, 140415569092608, 140415569100799, +SNULL, 140415569088511, 140415569092607, +STORE, 140415569084416, 140415569088511, +STORE, 140415569088512, 140415569092607, +SNULL, 140415571226623, 140415571230719, +STORE, 140415571222528, 140415571226623, +STORE, 140415571226624, 140415571230719, +ERASE, 140415606181888, 140415606210559, +STORE, 140415606181888, 140415606210559, +STORE, 140415564759040, 140415566905343, +SNULL, 140415564759040, 140415564804095, +STORE, 140415564804096, 140415566905343, +STORE, 140415564759040, 140415564804095, +SNULL, 140415566897151, 140415566905343, +STORE, 140415564804096, 140415566897151, +STORE, 140415566897152, 140415566905343, +ERASE, 140415566897152, 140415566905343, +STORE, 140415566897152, 140415566905343, +STORE, 140415562588160, 140415564759039, +SNULL, 140415562588160, 140415562629119, +STORE, 140415562629120, 140415564759039, +STORE, 140415562588160, 140415562629119, +SNULL, 140415564726271, 140415564759039, +STORE, 140415562629120, 140415564726271, +STORE, 140415564726272, 140415564759039, +SNULL, 140415564726272, 140415564734463, +STORE, 140415564734464, 140415564759039, +STORE, 140415564726272, 140415564734463, +ERASE, 140415564726272, 140415564734463, +STORE, 140415564726272, 140415564734463, +ERASE, 140415564734464, 140415564759039, +STORE, 140415564734464, 140415564759039, +SNULL, 140415564730367, 140415564734463, +STORE, 140415564726272, 140415564730367, +STORE, 140415564730368, 140415564734463, +SNULL, 140415566901247, 140415566905343, +STORE, 140415566897152, 140415566901247, +STORE, 140415566901248, 140415566905343, +ERASE, 140415606181888, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415605944320, 140415606210559, +ERASE, 140415605944320, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 94478511173632, 94478512414719, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 140415606206464, 140415606210559, +ERASE, 140415606206464, 140415606210559, +STORE, 94478511173632, 94478512652287, +STORE, 94478511173632, 94478512787455, +STORE, 94478511173632, 94478512922623, +STORE, 94478511173632, 94478513057791, +STORE, 140415537422336, 140415562588159, +STORE, 94478511173632, 94478513192959, +STORE, 94478511173632, 94478513356799, +STORE, 94478511173632, 94478513491967, +STORE, 94478511173632, 94478513627135, +STORE, 94478511173632, 94478513790975, +STORE, 94478511173632, 94478513926143, +STORE, 94478511173632, 94478514061311, +STORE, 94478511173632, 94478514196479, +STORE, 94478511173632, 94478514331647, +STORE, 94478511173632, 94478514606079, +STORE, 94478511173632, 94478514741247, +STORE, 94478511173632, 94478514876415, +STORE, 94478511173632, 94478515011583, +STORE, 94478511173632, 94478515146751, +STORE, 94478511173632, 94478515281919, +STORE, 94478511173632, 94478515474431, +STORE, 94478511173632, 94478515609599, +STORE, 94478511173632, 94478515744767, +STORE, 140415536922624, 140415562588159, +STORE, 94478511173632, 94478515879935, +STORE, 94478511173632, 94478516015103, +STORE, 94478511173632, 94478516150271, +STORE, 94478511173632, 94478516285439, +STORE, 94478511173632, 94478516420607, +STORE, 94478511173632, 94478516555775, +STORE, 94478511173632, 94478516690943, +STORE, 94478511173632, 94478516826111, +STORE, 94478511173632, 94478516961279, +STORE, 94478511173632, 94478517231615, +STORE, 94478511173632, 94478517366783, +STORE, 94478511173632, 94478517501951, +STORE, 94478511173632, 94478517637119, +STORE, 94478511173632, 94478517772287, +STORE, 94478511173632, 94478517907455, +STORE, 94478511173632, 94478518042623, +STORE, 94478511173632, 94478518177791, +STORE, 94478511173632, 94478518312959, +STORE, 94478511173632, 94478518448127, +STORE, 140415535910912, 140415562588159, +SNULL, 140415536922623, 140415562588159, +STORE, 140415535910912, 140415536922623, +STORE, 140415536922624, 140415562588159, +SNULL, 140415536922624, 140415537422335, +STORE, 140415537422336, 140415562588159, +STORE, 140415536922624, 140415537422335, +ERASE, 140415536922624, 140415537422335, +STORE, 94478511173632, 94478518583295, +STORE, 94478511173632, 94478518718463, +STORE, 94478511173632, 94478518853631, +STORE, 94478511173632, 94478518988799, +STORE, 94478511173632, 94478519123967, +STORE, 94478511173632, 94478519259135, +STORE, 140415509696512, 140415535910911, +ERASE, 140415537422336, 140415562588159, +STORE, 140415482433536, 140415509696511, + }; + unsigned long set28[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140722475622400, 140737488351231, +SNULL, 140722475626495, 140737488351231, +STORE, 140722475622400, 140722475626495, +STORE, 140722475491328, 140722475626495, +STORE, 93865834291200, 93865836548095, +SNULL, 93865834422271, 93865836548095, +STORE, 93865834291200, 93865834422271, +STORE, 93865834422272, 93865836548095, +ERASE, 93865834422272, 93865836548095, +STORE, 93865836519424, 93865836527615, +STORE, 93865836527616, 93865836548095, +STORE, 139918411104256, 139918413357055, +SNULL, 139918411247615, 139918413357055, +STORE, 139918411104256, 139918411247615, +STORE, 139918411247616, 139918413357055, +ERASE, 139918411247616, 139918413357055, +STORE, 139918413344768, 139918413352959, +STORE, 139918413352960, 139918413357055, +STORE, 140722476642304, 140722476646399, +STORE, 140722476630016, 140722476642303, +STORE, 139918413316096, 139918413344767, +STORE, 139918413307904, 139918413316095, +STORE, 139918408888320, 139918411104255, +SNULL, 139918408888320, 139918408986623, +STORE, 139918408986624, 139918411104255, +STORE, 139918408888320, 139918408986623, +SNULL, 139918411079679, 139918411104255, +STORE, 139918408986624, 139918411079679, +STORE, 139918411079680, 139918411104255, +SNULL, 139918411079680, 139918411087871, +STORE, 139918411087872, 139918411104255, +STORE, 139918411079680, 139918411087871, +ERASE, 139918411079680, 139918411087871, +STORE, 139918411079680, 139918411087871, +ERASE, 139918411087872, 139918411104255, +STORE, 139918411087872, 139918411104255, +STORE, 139918405091328, 139918408888319, +SNULL, 139918405091328, 139918406750207, +STORE, 139918406750208, 139918408888319, +STORE, 139918405091328, 139918406750207, +SNULL, 139918408847359, 139918408888319, +STORE, 139918406750208, 139918408847359, +STORE, 139918408847360, 139918408888319, +SNULL, 139918408847360, 139918408871935, +STORE, 139918408871936, 139918408888319, +STORE, 139918408847360, 139918408871935, +ERASE, 139918408847360, 139918408871935, +STORE, 139918408847360, 139918408871935, +ERASE, 139918408871936, 139918408888319, +STORE, 139918408871936, 139918408888319, +STORE, 139918413299712, 139918413316095, +SNULL, 139918408863743, 139918408871935, +STORE, 139918408847360, 139918408863743, +STORE, 139918408863744, 139918408871935, +SNULL, 139918411083775, 139918411087871, +STORE, 139918411079680, 139918411083775, +STORE, 139918411083776, 139918411087871, +SNULL, 93865836523519, 93865836527615, +STORE, 93865836519424, 93865836523519, +STORE, 93865836523520, 93865836527615, +SNULL, 139918413348863, 139918413352959, +STORE, 139918413344768, 139918413348863, +STORE, 139918413348864, 139918413352959, +ERASE, 139918413316096, 139918413344767, +STORE, 93865848528896, 93865848664063, + }; + unsigned long set29[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140734467944448, 140737488351231, +SNULL, 140734467948543, 140737488351231, +STORE, 140734467944448, 140734467948543, +STORE, 140734467813376, 140734467948543, +STORE, 94880407924736, 94880410177535, +SNULL, 94880408055807, 94880410177535, +STORE, 94880407924736, 94880408055807, +STORE, 94880408055808, 94880410177535, +ERASE, 94880408055808, 94880410177535, +STORE, 94880410148864, 94880410157055, +STORE, 94880410157056, 94880410177535, +STORE, 140143367815168, 140143370067967, +SNULL, 140143367958527, 140143370067967, +STORE, 140143367815168, 140143367958527, +STORE, 140143367958528, 140143370067967, +ERASE, 140143367958528, 140143370067967, +STORE, 140143370055680, 140143370063871, +STORE, 140143370063872, 140143370067967, +STORE, 140734468329472, 140734468333567, +STORE, 140734468317184, 140734468329471, +STORE, 140143370027008, 140143370055679, +STORE, 140143370018816, 140143370027007, +STORE, 140143365599232, 140143367815167, +SNULL, 140143365599232, 140143365697535, +STORE, 140143365697536, 140143367815167, +STORE, 140143365599232, 140143365697535, +SNULL, 140143367790591, 140143367815167, +STORE, 140143365697536, 140143367790591, +STORE, 140143367790592, 140143367815167, +SNULL, 140143367790592, 140143367798783, +STORE, 140143367798784, 140143367815167, +STORE, 140143367790592, 140143367798783, +ERASE, 140143367790592, 140143367798783, +STORE, 140143367790592, 140143367798783, +ERASE, 140143367798784, 140143367815167, +STORE, 140143367798784, 140143367815167, +STORE, 140143361802240, 140143365599231, +SNULL, 140143361802240, 140143363461119, +STORE, 140143363461120, 140143365599231, +STORE, 140143361802240, 140143363461119, +SNULL, 140143365558271, 140143365599231, +STORE, 140143363461120, 140143365558271, +STORE, 140143365558272, 140143365599231, +SNULL, 140143365558272, 140143365582847, +STORE, 140143365582848, 140143365599231, +STORE, 140143365558272, 140143365582847, +ERASE, 140143365558272, 140143365582847, +STORE, 140143365558272, 140143365582847, +ERASE, 140143365582848, 140143365599231, +STORE, 140143365582848, 140143365599231, +STORE, 140143370010624, 140143370027007, +SNULL, 140143365574655, 140143365582847, +STORE, 140143365558272, 140143365574655, +STORE, 140143365574656, 140143365582847, +SNULL, 140143367794687, 140143367798783, +STORE, 140143367790592, 140143367794687, +STORE, 140143367794688, 140143367798783, +SNULL, 94880410152959, 94880410157055, +STORE, 94880410148864, 94880410152959, +STORE, 94880410152960, 94880410157055, +SNULL, 140143370059775, 140143370063871, +STORE, 140143370055680, 140143370059775, +STORE, 140143370059776, 140143370063871, +ERASE, 140143370027008, 140143370055679, +STORE, 94880442400768, 94880442535935, +STORE, 140143353409536, 140143361802239, +SNULL, 140143353413631, 140143361802239, +STORE, 140143353409536, 140143353413631, +STORE, 140143353413632, 140143361802239, +STORE, 140143345016832, 140143353409535, +STORE, 140143210799104, 140143345016831, +SNULL, 140143210799104, 140143239364607, +STORE, 140143239364608, 140143345016831, +STORE, 140143210799104, 140143239364607, +ERASE, 140143210799104, 140143239364607, +SNULL, 140143306473471, 140143345016831, +STORE, 140143239364608, 140143306473471, +STORE, 140143306473472, 140143345016831, +ERASE, 140143306473472, 140143345016831, +SNULL, 140143239499775, 140143306473471, +STORE, 140143239364608, 140143239499775, +STORE, 140143239499776, 140143306473471, +SNULL, 140143345020927, 140143353409535, +STORE, 140143345016832, 140143345020927, +STORE, 140143345020928, 140143353409535, +STORE, 140143336624128, 140143345016831, +SNULL, 140143336628223, 140143345016831, +STORE, 140143336624128, 140143336628223, +STORE, 140143336628224, 140143345016831, +STORE, 140143328231424, 140143336624127, +SNULL, 140143328235519, 140143336624127, +STORE, 140143328231424, 140143328235519, +STORE, 140143328235520, 140143336624127, +STORE, 140143319838720, 140143328231423, +SNULL, 140143319842815, 140143328231423, +STORE, 140143319838720, 140143319842815, +STORE, 140143319842816, 140143328231423, +STORE, 140143311446016, 140143319838719, +STORE, 140143105146880, 140143239364607, +STORE, 140143096754176, 140143105146879, +STORE, 140143029645312, 140143096754175, +ERASE, 140143029645312, 140143096754175, +STORE, 140142962536448, 140143096754175, +SNULL, 140142962536448, 140142970929151, +STORE, 140142970929152, 140143096754175, +STORE, 140142962536448, 140142970929151, +ERASE, 140142962536448, 140142970929151, +STORE, 140142962536448, 140142970929151, +STORE, 140142828318720, 140142962536447, +STORE, 140142819926016, 140142828318719, +SNULL, 140142828318720, 140142836711423, +STORE, 140142836711424, 140142962536447, +STORE, 140142828318720, 140142836711423, +ERASE, 140142828318720, 140142836711423, +SNULL, 140143172255743, 140143239364607, +STORE, 140143105146880, 140143172255743, +STORE, 140143172255744, 140143239364607, +ERASE, 140143172255744, 140143239364607, +SNULL, 140143105282047, 140143172255743, +STORE, 140143105146880, 140143105282047, +STORE, 140143105282048, 140143172255743, +SNULL, 140143038038015, 140143096754175, +STORE, 140142970929152, 140143038038015, +STORE, 140143038038016, 140143096754175, +ERASE, 140143038038016, 140143096754175, +SNULL, 140142971064319, 140143038038015, +STORE, 140142970929152, 140142971064319, +STORE, 140142971064320, 140143038038015, +SNULL, 140142903820287, 140142962536447, +STORE, 140142836711424, 140142903820287, +STORE, 140142903820288, 140142962536447, +ERASE, 140142903820288, 140142962536447, +SNULL, 140142836846591, 140142903820287, +STORE, 140142836711424, 140142836846591, +STORE, 140142836846592, 140142903820287, +STORE, 140142685708288, 140142819926015, +SNULL, 140143311450111, 140143319838719, +STORE, 140143311446016, 140143311450111, +STORE, 140143311450112, 140143319838719, +SNULL, 140142962540543, 140142970929151, +STORE, 140142962536448, 140142962540543, +STORE, 140142962540544, 140142970929151, +SNULL, 140142685708288, 140142702493695, +STORE, 140142702493696, 140142819926015, +STORE, 140142685708288, 140142702493695, +ERASE, 140142685708288, 140142702493695, +SNULL, 140142769602559, 140142819926015, +STORE, 140142702493696, 140142769602559, +STORE, 140142769602560, 140142819926015, +ERASE, 140142769602560, 140142819926015, +SNULL, 140142702628863, 140142769602559, +STORE, 140142702493696, 140142702628863, +STORE, 140142702628864, 140142769602559, +STORE, 140143230971904, 140143239364607, +SNULL, 140143230975999, 140143239364607, +STORE, 140143230971904, 140143230975999, +STORE, 140143230976000, 140143239364607, +SNULL, 140143096758271, 140143105146879, +STORE, 140143096754176, 140143096758271, +STORE, 140143096758272, 140143105146879, +STORE, 140143222579200, 140143230971903, +SNULL, 140143222583295, 140143230971903, +STORE, 140143222579200, 140143222583295, +STORE, 140143222583296, 140143230971903, +STORE, 140143214186496, 140143222579199, +SNULL, 140142819930111, 140142828318719, +STORE, 140142819926016, 140142819930111, +STORE, 140142819930112, 140142828318719, +STORE, 140143205793792, 140143222579199, +SNULL, 140143205793792, 140143214186495, +STORE, 140143214186496, 140143222579199, +STORE, 140143205793792, 140143214186495, +SNULL, 140143214190591, 140143222579199, +STORE, 140143214186496, 140143214190591, +STORE, 140143214190592, 140143222579199, +SNULL, 140143205797887, 140143214186495, +STORE, 140143205793792, 140143205797887, +STORE, 140143205797888, 140143214186495, +STORE, 140143197401088, 140143205793791, +SNULL, 140143197405183, 140143205793791, +STORE, 140143197401088, 140143197405183, +STORE, 140143197405184, 140143205793791, +STORE, 140143189008384, 140143197401087, +STORE, 140143180615680, 140143197401087, +STORE, 140143088361472, 140143096754175, +SNULL, 140143180619775, 140143197401087, +STORE, 140143180615680, 140143180619775, +STORE, 140143180619776, 140143197401087, +SNULL, 140143180619776, 140143189008383, +STORE, 140143189008384, 140143197401087, +STORE, 140143180619776, 140143189008383, +SNULL, 140143189012479, 140143197401087, +STORE, 140143189008384, 140143189012479, +STORE, 140143189012480, 140143197401087, +SNULL, 140143088365567, 140143096754175, +STORE, 140143088361472, 140143088365567, +STORE, 140143088365568, 140143096754175, +STORE, 140143079968768, 140143088361471, +SNULL, 140143079972863, 140143088361471, +STORE, 140143079968768, 140143079972863, +STORE, 140143079972864, 140143088361471, +STORE, 140143071576064, 140143079968767, +SNULL, 140143071580159, 140143079968767, +STORE, 140143071576064, 140143071580159, +STORE, 140143071580160, 140143079968767, +STORE, 140143063183360, 140143071576063, +STORE, 140143054790656, 140143071576063, +SNULL, 140143054794751, 140143071576063, +STORE, 140143054790656, 140143054794751, +STORE, 140143054794752, 140143071576063, +SNULL, 140143054794752, 140143063183359, +STORE, 140143063183360, 140143071576063, +STORE, 140143054794752, 140143063183359, +SNULL, 140143063187455, 140143071576063, +STORE, 140143063183360, 140143063187455, +STORE, 140143063187456, 140143071576063, +STORE, 140143046397952, 140143054790655, +STORE, 140142954143744, 140142962536447, +STORE, 140142945751040, 140142962536447, +STORE, 140142937358336, 140142962536447, +STORE, 140142928965632, 140142962536447, +STORE, 140142568275968, 140142702493695, +SNULL, 140142635384831, 140142702493695, +STORE, 140142568275968, 140142635384831, +STORE, 140142635384832, 140142702493695, +ERASE, 140142635384832, 140142702493695, +STORE, 140142920572928, 140142962536447, +STORE, 140142912180224, 140142962536447, +STORE, 140142568275968, 140142702493695, +SNULL, 140142568275968, 140142635384831, +STORE, 140142635384832, 140142702493695, +STORE, 140142568275968, 140142635384831, +SNULL, 140142635519999, 140142702493695, +STORE, 140142635384832, 140142635519999, +STORE, 140142635520000, 140142702493695, +STORE, 140142819930112, 140142836711423, +STORE, 140142811533312, 140142819926015, +STORE, 140142434058240, 140142635384831, +SNULL, 140142501167103, 140142635384831, +STORE, 140142434058240, 140142501167103, +STORE, 140142501167104, 140142635384831, +SNULL, 140142501167104, 140142568275967, +STORE, 140142568275968, 140142635384831, +STORE, 140142501167104, 140142568275967, +ERASE, 140142501167104, 140142568275967, +STORE, 140142299840512, 140142501167103, +STORE, 140142803140608, 140142819926015, +SNULL, 140142366949375, 140142501167103, +STORE, 140142299840512, 140142366949375, +STORE, 140142366949376, 140142501167103, +SNULL, 140142366949376, 140142434058239, +STORE, 140142434058240, 140142501167103, +STORE, 140142366949376, 140142434058239, +ERASE, 140142366949376, 140142434058239, +STORE, 140142794747904, 140142819926015, +STORE, 140142786355200, 140142819926015, +STORE, 140142299840512, 140142501167103, +STORE, 140142777962496, 140142819926015, +STORE, 140142559883264, 140142568275967, +STORE, 140142232731648, 140142501167103, +STORE, 140142551490560, 140142568275967, +SNULL, 140142777962496, 140142803140607, +STORE, 140142803140608, 140142819926015, +STORE, 140142777962496, 140142803140607, +SNULL, 140142803144703, 140142819926015, +STORE, 140142803140608, 140142803144703, +STORE, 140142803144704, 140142819926015, +STORE, 140142543097856, 140142568275967, +STORE, 140142098513920, 140142501167103, +SNULL, 140142165622783, 140142501167103, +STORE, 140142098513920, 140142165622783, +STORE, 140142165622784, 140142501167103, +SNULL, 140142165622784, 140142232731647, +STORE, 140142232731648, 140142501167103, +STORE, 140142165622784, 140142232731647, +ERASE, 140142165622784, 140142232731647, +SNULL, 140142568411135, 140142635384831, +STORE, 140142568275968, 140142568411135, +STORE, 140142568411136, 140142635384831, +STORE, 140141964296192, 140142165622783, +SNULL, 140142912180224, 140142928965631, +STORE, 140142928965632, 140142962536447, +STORE, 140142912180224, 140142928965631, +SNULL, 140142928969727, 140142962536447, +STORE, 140142928965632, 140142928969727, +STORE, 140142928969728, 140142962536447, +STORE, 140141830078464, 140142165622783, +SNULL, 140142912184319, 140142928965631, +STORE, 140142912180224, 140142912184319, +STORE, 140142912184320, 140142928965631, +SNULL, 140142232731648, 140142434058239, +STORE, 140142434058240, 140142501167103, +STORE, 140142232731648, 140142434058239, +SNULL, 140142434193407, 140142501167103, +STORE, 140142434058240, 140142434193407, +STORE, 140142434193408, 140142501167103, +SNULL, 140142232731648, 140142299840511, +STORE, 140142299840512, 140142434058239, +STORE, 140142232731648, 140142299840511, +SNULL, 140142299975679, 140142434058239, +STORE, 140142299840512, 140142299975679, +STORE, 140142299975680, 140142434058239, +SNULL, 140142928969728, 140142954143743, +STORE, 140142954143744, 140142962536447, +STORE, 140142928969728, 140142954143743, +SNULL, 140142954147839, 140142962536447, +STORE, 140142954143744, 140142954147839, +STORE, 140142954147840, 140142962536447, +STORE, 140141830078464, 140142299840511, +SNULL, 140142543097856, 140142559883263, +STORE, 140142559883264, 140142568275967, +STORE, 140142543097856, 140142559883263, +SNULL, 140142559887359, 140142568275967, +STORE, 140142559883264, 140142559887359, +STORE, 140142559887360, 140142568275967, +STORE, 140142534705152, 140142559883263, +SNULL, 140142928969728, 140142945751039, +STORE, 140142945751040, 140142954143743, +STORE, 140142928969728, 140142945751039, +SNULL, 140142945755135, 140142954143743, +STORE, 140142945751040, 140142945755135, +STORE, 140142945755136, 140142954143743, +SNULL, 140142299975680, 140142366949375, +STORE, 140142366949376, 140142434058239, +STORE, 140142299975680, 140142366949375, +SNULL, 140142367084543, 140142434058239, +STORE, 140142366949376, 140142367084543, +STORE, 140142367084544, 140142434058239, +SNULL, 140142928969728, 140142937358335, +STORE, 140142937358336, 140142945751039, +STORE, 140142928969728, 140142937358335, +SNULL, 140142937362431, 140142945751039, +STORE, 140142937358336, 140142937362431, +STORE, 140142937362432, 140142945751039, +SNULL, 140141830078464, 140142232731647, +STORE, 140142232731648, 140142299840511, +STORE, 140141830078464, 140142232731647, +SNULL, 140142232866815, 140142299840511, +STORE, 140142232731648, 140142232866815, +STORE, 140142232866816, 140142299840511, +SNULL, 140142534705152, 140142543097855, +STORE, 140142543097856, 140142559883263, +STORE, 140142534705152, 140142543097855, +SNULL, 140142543101951, 140142559883263, +STORE, 140142543097856, 140142543101951, +STORE, 140142543101952, 140142559883263, +STORE, 140142526312448, 140142543097855, +STORE, 140142517919744, 140142543097855, +SNULL, 140141830078464, 140142098513919, +STORE, 140142098513920, 140142232731647, +STORE, 140141830078464, 140142098513919, +SNULL, 140142098649087, 140142232731647, +STORE, 140142098513920, 140142098649087, +STORE, 140142098649088, 140142232731647, +SNULL, 140142031405055, 140142098513919, +STORE, 140141830078464, 140142031405055, +STORE, 140142031405056, 140142098513919, +ERASE, 140142031405056, 140142098513919, +SNULL, 140141830078464, 140141964296191, +STORE, 140141964296192, 140142031405055, +STORE, 140141830078464, 140141964296191, +SNULL, 140141964431359, 140142031405055, +STORE, 140141964296192, 140141964431359, +STORE, 140141964431360, 140142031405055, +STORE, 140142509527040, 140142543097855, +SNULL, 140141897187327, 140141964296191, +STORE, 140141830078464, 140141897187327, +STORE, 140141897187328, 140141964296191, +ERASE, 140141897187328, 140141964296191, +SNULL, 140141830213631, 140141897187327, +STORE, 140141830078464, 140141830213631, +STORE, 140141830213632, 140141897187327, +SNULL, 140142803144704, 140142811533311, +STORE, 140142811533312, 140142819926015, +STORE, 140142803144704, 140142811533311, +SNULL, 140142811537407, 140142819926015, +STORE, 140142811533312, 140142811537407, +STORE, 140142811537408, 140142819926015, +SNULL, 140142098649088, 140142165622783, +STORE, 140142165622784, 140142232731647, +STORE, 140142098649088, 140142165622783, +SNULL, 140142165757951, 140142232731647, +STORE, 140142165622784, 140142165757951, +STORE, 140142165757952, 140142232731647, +STORE, 140142090121216, 140142098513919, +SNULL, 140142777962496, 140142786355199, +STORE, 140142786355200, 140142803140607, +STORE, 140142777962496, 140142786355199, +SNULL, 140142786359295, 140142803140607, +STORE, 140142786355200, 140142786359295, +STORE, 140142786359296, 140142803140607, +SNULL, 140142509527040, 140142534705151, +STORE, 140142534705152, 140142543097855, +STORE, 140142509527040, 140142534705151, +SNULL, 140142534709247, 140142543097855, +STORE, 140142534705152, 140142534709247, +STORE, 140142534709248, 140142543097855, +STORE, 140142081728512, 140142098513919, +SNULL, 140142786359296, 140142794747903, +STORE, 140142794747904, 140142803140607, +STORE, 140142786359296, 140142794747903, +SNULL, 140142794751999, 140142803140607, +STORE, 140142794747904, 140142794751999, +STORE, 140142794752000, 140142803140607, +STORE, 140142073335808, 140142098513919, +SNULL, 140142073339903, 140142098513919, +STORE, 140142073335808, 140142073339903, +STORE, 140142073339904, 140142098513919, +SNULL, 140142543101952, 140142551490559, +STORE, 140142551490560, 140142559883263, +STORE, 140142543101952, 140142551490559, +SNULL, 140142551494655, 140142559883263, +STORE, 140142551490560, 140142551494655, +STORE, 140142551494656, 140142559883263, +SNULL, 140142509527040, 140142517919743, +STORE, 140142517919744, 140142534705151, +STORE, 140142509527040, 140142517919743, +SNULL, 140142517923839, 140142534705151, +STORE, 140142517919744, 140142517923839, +STORE, 140142517923840, 140142534705151, +STORE, 140142064943104, 140142073335807, +SNULL, 140142073339904, 140142090121215, +STORE, 140142090121216, 140142098513919, +STORE, 140142073339904, 140142090121215, +SNULL, 140142090125311, 140142098513919, +STORE, 140142090121216, 140142090125311, +STORE, 140142090125312, 140142098513919, +STORE, 140142056550400, 140142073335807, +SNULL, 140142056554495, 140142073335807, +STORE, 140142056550400, 140142056554495, +STORE, 140142056554496, 140142073335807, +STORE, 140142048157696, 140142056550399, +SNULL, 140142509531135, 140142517919743, +STORE, 140142509527040, 140142509531135, +STORE, 140142509531136, 140142517919743, +SNULL, 140142777966591, 140142786355199, +STORE, 140142777962496, 140142777966591, +STORE, 140142777966592, 140142786355199, +SNULL, 140143046402047, 140143054790655, +STORE, 140143046397952, 140143046402047, +STORE, 140143046402048, 140143054790655, +SNULL, 140142912184320, 140142920572927, +STORE, 140142920572928, 140142928965631, +STORE, 140142912184320, 140142920572927, +SNULL, 140142920577023, 140142928965631, +STORE, 140142920572928, 140142920577023, +STORE, 140142920577024, 140142928965631, +STORE, 140142039764992, 140142056550399, +STORE, 140141955903488, 140141964296191, +SNULL, 140142819930112, 140142828318719, +STORE, 140142828318720, 140142836711423, +STORE, 140142819930112, 140142828318719, +SNULL, 140142828322815, 140142836711423, +STORE, 140142828318720, 140142828322815, +STORE, 140142828322816, 140142836711423, +SNULL, 140142517923840, 140142526312447, +STORE, 140142526312448, 140142534705151, +STORE, 140142517923840, 140142526312447, +SNULL, 140142526316543, 140142534705151, +STORE, 140142526312448, 140142526316543, +STORE, 140142526316544, 140142534705151, +STORE, 140141947510784, 140141964296191, +SNULL, 140142056554496, 140142064943103, +STORE, 140142064943104, 140142073335807, +STORE, 140142056554496, 140142064943103, +SNULL, 140142064947199, 140142073335807, +STORE, 140142064943104, 140142064947199, +STORE, 140142064947200, 140142073335807, +SNULL, 140142073339904, 140142081728511, +STORE, 140142081728512, 140142090121215, +STORE, 140142073339904, 140142081728511, +SNULL, 140142081732607, 140142090121215, +STORE, 140142081728512, 140142081732607, +STORE, 140142081732608, 140142090121215, +STORE, 140141939118080, 140141964296191, +STORE, 140141930725376, 140141964296191, +STORE, 140141922332672, 140141964296191, +STORE, 140141913939968, 140141964296191, +SNULL, 140141913939968, 140141922332671, +STORE, 140141922332672, 140141964296191, +STORE, 140141913939968, 140141922332671, +SNULL, 140141922336767, 140141964296191, +STORE, 140141922332672, 140141922336767, +STORE, 140141922336768, 140141964296191, +STORE, 140141905547264, 140141922332671, +SNULL, 140141905551359, 140141922332671, +STORE, 140141905547264, 140141905551359, +STORE, 140141905551360, 140141922332671, +STORE, 140141821685760, 140141830078463, +STORE, 140141813293056, 140141830078463, +STORE, 140141804900352, 140141830078463, +STORE, 140141796507648, 140141830078463, +SNULL, 140141796511743, 140141830078463, +STORE, 140141796507648, 140141796511743, +STORE, 140141796511744, 140141830078463, +SNULL, 140141922336768, 140141955903487, +STORE, 140141955903488, 140141964296191, +STORE, 140141922336768, 140141955903487, +SNULL, 140141955907583, 140141964296191, +STORE, 140141955903488, 140141955907583, +STORE, 140141955907584, 140141964296191, +STORE, 140141788114944, 140141796507647, +STORE, 140141779722240, 140141796507647, +SNULL, 140141779722240, 140141788114943, +STORE, 140141788114944, 140141796507647, +STORE, 140141779722240, 140141788114943, +SNULL, 140141788119039, 140141796507647, +STORE, 140141788114944, 140141788119039, +STORE, 140141788119040, 140141796507647, +SNULL, 140141922336768, 140141947510783, +STORE, 140141947510784, 140141955903487, +STORE, 140141922336768, 140141947510783, +SNULL, 140141947514879, 140141955903487, +STORE, 140141947510784, 140141947514879, +STORE, 140141947514880, 140141955903487, +SNULL, 140142039764992, 140142048157695, +STORE, 140142048157696, 140142056550399, +STORE, 140142039764992, 140142048157695, +SNULL, 140142048161791, 140142056550399, +STORE, 140142048157696, 140142048161791, +STORE, 140142048161792, 140142056550399, +SNULL, 140142039769087, 140142048157695, +STORE, 140142039764992, 140142039769087, +STORE, 140142039769088, 140142048157695, +SNULL, 140141796511744, 140141804900351, +STORE, 140141804900352, 140141830078463, +STORE, 140141796511744, 140141804900351, +SNULL, 140141804904447, 140141830078463, +STORE, 140141804900352, 140141804904447, +STORE, 140141804904448, 140141830078463, +STORE, 140141771329536, 140141788114943, +STORE, 140141762936832, 140141788114943, +STORE, 140141754544128, 140141788114943, +SNULL, 140141804904448, 140141821685759, +STORE, 140141821685760, 140141830078463, +STORE, 140141804904448, 140141821685759, +SNULL, 140141821689855, 140141830078463, +STORE, 140141821685760, 140141821689855, +STORE, 140141821689856, 140141830078463, +SNULL, 140141922336768, 140141939118079, +STORE, 140141939118080, 140141947510783, +STORE, 140141922336768, 140141939118079, +SNULL, 140141939122175, 140141947510783, +STORE, 140141939118080, 140141939122175, +STORE, 140141939122176, 140141947510783, +SNULL, 140141905551360, 140141913939967, +STORE, 140141913939968, 140141922332671, +STORE, 140141905551360, 140141913939967, +SNULL, 140141913944063, 140141922332671, +STORE, 140141913939968, 140141913944063, +STORE, 140141913944064, 140141922332671, +STORE, 140141746151424, 140141788114943, +STORE, 140141737758720, 140141788114943, +SNULL, 140141804904448, 140141813293055, +STORE, 140141813293056, 140141821685759, +STORE, 140141804904448, 140141813293055, +SNULL, 140141813297151, 140141821685759, +STORE, 140141813293056, 140141813297151, +STORE, 140141813297152, 140141821685759, +STORE, 140141729366016, 140141788114943, +STORE, 140141720973312, 140141788114943, +STORE, 140141712580608, 140141788114943, +SNULL, 140141712584703, 140141788114943, +STORE, 140141712580608, 140141712584703, +STORE, 140141712584704, 140141788114943, +SNULL, 140141922336768, 140141930725375, +STORE, 140141930725376, 140141939118079, +STORE, 140141922336768, 140141930725375, +SNULL, 140141930729471, 140141939118079, +STORE, 140141930725376, 140141930729471, +STORE, 140141930729472, 140141939118079, +STORE, 140141704187904, 140141712580607, +SNULL, 140141704191999, 140141712580607, +STORE, 140141704187904, 140141704191999, +STORE, 140141704192000, 140141712580607, +STORE, 140141695795200, 140141704187903, +STORE, 140141687402496, 140141704187903, +SNULL, 140141712584704, 140141771329535, +STORE, 140141771329536, 140141788114943, +STORE, 140141712584704, 140141771329535, +SNULL, 140141771333631, 140141788114943, +STORE, 140141771329536, 140141771333631, +STORE, 140141771333632, 140141788114943, +SNULL, 140141771333632, 140141779722239, +STORE, 140141779722240, 140141788114943, +STORE, 140141771333632, 140141779722239, +SNULL, 140141779726335, 140141788114943, +STORE, 140141779722240, 140141779726335, +STORE, 140141779726336, 140141788114943, +STORE, 140141679009792, 140141704187903, +SNULL, 140141679013887, 140141704187903, +STORE, 140141679009792, 140141679013887, +STORE, 140141679013888, 140141704187903, +STORE, 140141670617088, 140141679009791, +SNULL, 140141670621183, 140141679009791, +STORE, 140141670617088, 140141670621183, +STORE, 140141670621184, 140141679009791, +STORE, 140141662224384, 140141670617087, +SNULL, 140141712584704, 140141737758719, +STORE, 140141737758720, 140141771329535, +STORE, 140141712584704, 140141737758719, +SNULL, 140141737762815, 140141771329535, +STORE, 140141737758720, 140141737762815, +STORE, 140141737762816, 140141771329535, +SNULL, 140141712584704, 140141729366015, +STORE, 140141729366016, 140141737758719, +STORE, 140141712584704, 140141729366015, +SNULL, 140141729370111, 140141737758719, +STORE, 140141729366016, 140141729370111, +STORE, 140141729370112, 140141737758719, +SNULL, 140141737762816, 140141746151423, +STORE, 140141746151424, 140141771329535, +STORE, 140141737762816, 140141746151423, +SNULL, 140141746155519, 140141771329535, +STORE, 140141746151424, 140141746155519, +STORE, 140141746155520, 140141771329535, +STORE, 140141653831680, 140141670617087, +SNULL, 140141746155520, 140141762936831, +STORE, 140141762936832, 140141771329535, +STORE, 140141746155520, 140141762936831, +SNULL, 140141762940927, 140141771329535, +STORE, 140141762936832, 140141762940927, +STORE, 140141762940928, 140141771329535, +STORE, 140141645438976, 140141670617087, +SNULL, 140141645443071, 140141670617087, +STORE, 140141645438976, 140141645443071, +STORE, 140141645443072, 140141670617087, +SNULL, 140141712584704, 140141720973311, +STORE, 140141720973312, 140141729366015, +STORE, 140141712584704, 140141720973311, +SNULL, 140141720977407, 140141729366015, +STORE, 140141720973312, 140141720977407, +STORE, 140141720977408, 140141729366015, +STORE, 140141637046272, 140141645438975, +SNULL, 140141637050367, 140141645438975, +STORE, 140141637046272, 140141637050367, +STORE, 140141637050368, 140141645438975, +STORE, 140141628653568, 140141637046271, +SNULL, 140141628657663, 140141637046271, +STORE, 140141628653568, 140141628657663, +STORE, 140141628657664, 140141637046271, +STORE, 140141620260864, 140141628653567, +SNULL, 140141679013888, 140141687402495, +STORE, 140141687402496, 140141704187903, +STORE, 140141679013888, 140141687402495, +SNULL, 140141687406591, 140141704187903, +STORE, 140141687402496, 140141687406591, +STORE, 140141687406592, 140141704187903, +SNULL, 140141746155520, 140141754544127, +STORE, 140141754544128, 140141762936831, +STORE, 140141746155520, 140141754544127, +SNULL, 140141754548223, 140141762936831, +STORE, 140141754544128, 140141754548223, +STORE, 140141754548224, 140141762936831, +SNULL, 140141687406592, 140141695795199, +STORE, 140141695795200, 140141704187903, +STORE, 140141687406592, 140141695795199, +SNULL, 140141695799295, 140141704187903, +STORE, 140141695795200, 140141695799295, +STORE, 140141695799296, 140141704187903, +STORE, 140141611868160, 140141628653567, +SNULL, 140141611872255, 140141628653567, +STORE, 140141611868160, 140141611872255, +STORE, 140141611872256, 140141628653567, +SNULL, 140141645443072, 140141662224383, +STORE, 140141662224384, 140141670617087, +STORE, 140141645443072, 140141662224383, +SNULL, 140141662228479, 140141670617087, +STORE, 140141662224384, 140141662228479, +STORE, 140141662228480, 140141670617087, +STORE, 140141603475456, 140141611868159, +SNULL, 140141603479551, 140141611868159, +STORE, 140141603475456, 140141603479551, +STORE, 140141603479552, 140141611868159, +STORE, 140141595082752, 140141603475455, +SNULL, 140141645443072, 140141653831679, +STORE, 140141653831680, 140141662224383, +STORE, 140141645443072, 140141653831679, +SNULL, 140141653835775, 140141662224383, +STORE, 140141653831680, 140141653835775, +STORE, 140141653835776, 140141662224383, +STORE, 140141586690048, 140141603475455, +SNULL, 140141611872256, 140141620260863, +STORE, 140141620260864, 140141628653567, +STORE, 140141611872256, 140141620260863, +SNULL, 140141620264959, 140141628653567, +STORE, 140141620260864, 140141620264959, +STORE, 140141620264960, 140141628653567, +SNULL, 140141586690048, 140141595082751, +STORE, 140141595082752, 140141603475455, +STORE, 140141586690048, 140141595082751, +SNULL, 140141595086847, 140141603475455, +STORE, 140141595082752, 140141595086847, +STORE, 140141595086848, 140141603475455, +STORE, 140141578297344, 140141595082751, +SNULL, 140141578301439, 140141595082751, +STORE, 140141578297344, 140141578301439, +STORE, 140141578301440, 140141595082751, +SNULL, 140141578301440, 140141586690047, +STORE, 140141586690048, 140141595082751, +STORE, 140141578301440, 140141586690047, +SNULL, 140141586694143, 140141595082751, +STORE, 140141586690048, 140141586694143, +STORE, 140141586694144, 140141595082751, +STORE, 140143370027008, 140143370055679, +STORE, 140143309254656, 140143311446015, +SNULL, 140143309254656, 140143309344767, +STORE, 140143309344768, 140143311446015, +STORE, 140143309254656, 140143309344767, +SNULL, 140143311437823, 140143311446015, +STORE, 140143309344768, 140143311437823, +STORE, 140143311437824, 140143311446015, +ERASE, 140143311437824, 140143311446015, +STORE, 140143311437824, 140143311446015, +SNULL, 140143311441919, 140143311446015, +STORE, 140143311437824, 140143311441919, +STORE, 140143311441920, 140143311446015, +ERASE, 140143370027008, 140143370055679, +ERASE, 140142912180224, 140142912184319, +ERASE, 140142912184320, 140142920572927, +ERASE, 140142945751040, 140142945755135, +ERASE, 140142945755136, 140142954143743, +ERASE, 140142090121216, 140142090125311, +ERASE, 140142090125312, 140142098513919, +ERASE, 140142794747904, 140142794751999, +ERASE, 140142794752000, 140142803140607, +ERASE, 140141913939968, 140141913944063, +ERASE, 140141913944064, 140141922332671, +ERASE, 140141746151424, 140141746155519, +ERASE, 140141746155520, 140141754544127, +ERASE, 140142954143744, 140142954147839, +ERASE, 140142954147840, 140142962536447, +ERASE, 140142081728512, 140142081732607, +ERASE, 140142081732608, 140142090121215, +ERASE, 140141905547264, 140141905551359, +ERASE, 140141905551360, 140141913939967, +ERASE, 140141729366016, 140141729370111, +ERASE, 140141729370112, 140141737758719, +ERASE, 140142920572928, 140142920577023, +ERASE, 140142920577024, 140142928965631, +ERASE, 140142039764992, 140142039769087, +ERASE, 140142039769088, 140142048157695, +ERASE, 140141679009792, 140141679013887, +ERASE, 140141679013888, 140141687402495, +ERASE, 140142551490560, 140142551494655, +ERASE, 140142551494656, 140142559883263, +ERASE, 140141947510784, 140141947514879, +ERASE, 140141947514880, 140141955903487, +ERASE, 140141771329536, 140141771333631, +ERASE, 140141771333632, 140141779722239, +ERASE, 140142928965632, 140142928969727, +ERASE, 140142928969728, 140142937358335, +ERASE, 140142073335808, 140142073339903, +ERASE, 140142073339904, 140142081728511, +ERASE, 140142543097856, 140142543101951, +ERASE, 140142543101952, 140142551490559, +ERASE, 140141955903488, 140141955907583, +ERASE, 140141955907584, 140141964296191, +ERASE, 140141704187904, 140141704191999, +ERASE, 140141704192000, 140141712580607, +ERASE, 140142786355200, 140142786359295, +ERASE, 140142786359296, 140142794747903, +ERASE, 140142056550400, 140142056554495, +ERASE, 140142056554496, 140142064943103, +ERASE, 140142828318720, 140142828322815, +ERASE, 140142828322816, 140142836711423, +ERASE, 140141788114944, 140141788119039, +ERASE, 140141788119040, 140141796507647, +ERASE, 140141695795200, 140141695799295, +ERASE, 140141695799296, 140141704187903, +ERASE, 140141578297344, 140141578301439, +ERASE, 140141578301440, 140141586690047, +ERASE, 140141611868160, 140141611872255, +ERASE, 140141611872256, 140141620260863, +ERASE, 140142811533312, 140142811537407, +ERASE, 140142811537408, 140142819926015, +ERASE, 140142064943104, 140142064947199, +ERASE, 140142064947200, 140142073335807, +ERASE, 140141628653568, 140141628657663, +ERASE, 140141628657664, 140141637046271, +ERASE, 140143046397952, 140143046402047, +ERASE, 140143046402048, 140143054790655, +ERASE, 140141796507648, 140141796511743, +ERASE, 140141796511744, 140141804900351, +ERASE, 140142803140608, 140142803144703, +ERASE, 140142803144704, 140142811533311, +ERASE, 140142509527040, 140142509531135, +ERASE, 140142509531136, 140142517919743, +ERASE, 140141821685760, 140141821689855, +ERASE, 140141821689856, 140141830078463, +ERASE, 140142777962496, 140142777966591, +ERASE, 140142777966592, 140142786355199, +ERASE, 140141804900352, 140141804904447, +ERASE, 140141804904448, 140141813293055, +ERASE, 140141930725376, 140141930729471, +ERASE, 140141930729472, 140141939118079, +ERASE, 140142937358336, 140142937362431, +ERASE, 140142937362432, 140142945751039, +ERASE, 140142559883264, 140142559887359, +ERASE, 140142559887360, 140142568275967, +ERASE, 140142534705152, 140142534709247, +ERASE, 140142534709248, 140142543097855, +ERASE, 140142048157696, 140142048161791, +ERASE, 140142048161792, 140142056550399, +ERASE, 140141754544128, 140141754548223, +ERASE, 140141754548224, 140141762936831, +ERASE, 140141939118080, 140141939122175, +ERASE, 140141939122176, 140141947510783, +ERASE, 140141653831680, 140141653835775, +ERASE, 140141653835776, 140141662224383, +ERASE, 140141712580608, 140141712584703, +ERASE, 140141712584704, 140141720973311, +ERASE, 140141645438976, 140141645443071, +ERASE, 140141645443072, 140141653831679, +ERASE, 140141687402496, 140141687406591, +ERASE, 140141687406592, 140141695795199, +ERASE, 140141662224384, 140141662228479, +ERASE, 140141662228480, 140141670617087, +ERASE, 140141922332672, 140141922336767, +ERASE, 140141922336768, 140141930725375, +ERASE, 140141737758720, 140141737762815, +ERASE, 140141737762816, 140141746151423, +ERASE, 140141637046272, 140141637050367, +ERASE, 140141637050368, 140141645438975, +ERASE, 140142517919744, 140142517923839, +ERASE, 140142517923840, 140142526312447, +ERASE, 140143096754176, 140143096758271, +ERASE, 140143096758272, 140143105146879, +ERASE, 140141595082752, 140141595086847, +ERASE, 140141595086848, 140141603475455, +ERASE, 140141762936832, 140141762940927, +ERASE, 140141762940928, 140141771329535, +ERASE, 140143311446016, 140143311450111, +ERASE, 140143311450112, 140143319838719, +ERASE, 140142526312448, 140142526316543, +ERASE, 140142526316544, 140142534705151, +ERASE, 140142819926016, 140142819930111, +ERASE, 140142819930112, 140142828318719, +ERASE, 140143180615680, 140143180619775, +ERASE, 140143180619776, 140143189008383, +ERASE, 140142962536448, 140142962540543, +ERASE, 140142962540544, 140142970929151, +ERASE, 140143214186496, 140143214190591, +ERASE, 140143214190592, 140143222579199, +ERASE, 140143088361472, 140143088365567, +ERASE, 140143088365568, 140143096754175, +ERASE, 140141586690048, 140141586694143, +ERASE, 140141586694144, 140141595082751, +ERASE, 140143230971904, 140143230975999, +ERASE, 140143230976000, 140143239364607, +ERASE, 140141779722240, 140141779726335, +ERASE, 140141779726336, 140141788114943, +ERASE, 140141670617088, 140141670621183, +ERASE, 140141670621184, 140141679009791, +ERASE, 140141813293056, 140141813297151, +ERASE, 140141813297152, 140141821685759, +ERASE, 140143222579200, 140143222583295, +ERASE, 140143222583296, 140143230971903, +ERASE, 140143189008384, 140143189012479, +ERASE, 140143189012480, 140143197401087, +ERASE, 140143071576064, 140143071580159, +ERASE, 140143071580160, 140143079968767, +ERASE, 140141620260864, 140141620264959, +ERASE, 140141620264960, 140141628653567, +ERASE, 140141603475456, 140141603479551, +ERASE, 140141603479552, 140141611868159, +ERASE, 140141720973312, 140141720977407, +ERASE, 140141720977408, 140141729366015, +ERASE, 140143079968768, 140143079972863, +ERASE, 140143079972864, 140143088361471, +ERASE, 140143205793792, 140143205797887, +ERASE, 140143205797888, 140143214186495, + }; + unsigned long set30[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140733436743680, 140737488351231, +SNULL, 140733436747775, 140737488351231, +STORE, 140733436743680, 140733436747775, +STORE, 140733436612608, 140733436747775, +STORE, 94630728904704, 94630731157503, +SNULL, 94630729035775, 94630731157503, +STORE, 94630728904704, 94630729035775, +STORE, 94630729035776, 94630731157503, +ERASE, 94630729035776, 94630731157503, +STORE, 94630731128832, 94630731137023, +STORE, 94630731137024, 94630731157503, +STORE, 140165750841344, 140165753094143, +SNULL, 140165750984703, 140165753094143, +STORE, 140165750841344, 140165750984703, +STORE, 140165750984704, 140165753094143, +ERASE, 140165750984704, 140165753094143, +STORE, 140165753081856, 140165753090047, +STORE, 140165753090048, 140165753094143, +STORE, 140733436887040, 140733436891135, +STORE, 140733436874752, 140733436887039, +STORE, 140165753053184, 140165753081855, +STORE, 140165753044992, 140165753053183, +STORE, 140165748625408, 140165750841343, +SNULL, 140165748625408, 140165748723711, +STORE, 140165748723712, 140165750841343, +STORE, 140165748625408, 140165748723711, +SNULL, 140165750816767, 140165750841343, +STORE, 140165748723712, 140165750816767, +STORE, 140165750816768, 140165750841343, +SNULL, 140165750816768, 140165750824959, +STORE, 140165750824960, 140165750841343, +STORE, 140165750816768, 140165750824959, +ERASE, 140165750816768, 140165750824959, +STORE, 140165750816768, 140165750824959, +ERASE, 140165750824960, 140165750841343, +STORE, 140165750824960, 140165750841343, +STORE, 140165744828416, 140165748625407, +SNULL, 140165744828416, 140165746487295, +STORE, 140165746487296, 140165748625407, +STORE, 140165744828416, 140165746487295, +SNULL, 140165748584447, 140165748625407, +STORE, 140165746487296, 140165748584447, +STORE, 140165748584448, 140165748625407, +SNULL, 140165748584448, 140165748609023, +STORE, 140165748609024, 140165748625407, +STORE, 140165748584448, 140165748609023, +ERASE, 140165748584448, 140165748609023, +STORE, 140165748584448, 140165748609023, +ERASE, 140165748609024, 140165748625407, +STORE, 140165748609024, 140165748625407, +STORE, 140165753036800, 140165753053183, +SNULL, 140165748600831, 140165748609023, +STORE, 140165748584448, 140165748600831, +STORE, 140165748600832, 140165748609023, +SNULL, 140165750820863, 140165750824959, +STORE, 140165750816768, 140165750820863, +STORE, 140165750820864, 140165750824959, +SNULL, 94630731132927, 94630731137023, +STORE, 94630731128832, 94630731132927, +STORE, 94630731132928, 94630731137023, +SNULL, 140165753085951, 140165753090047, +STORE, 140165753081856, 140165753085951, +STORE, 140165753085952, 140165753090047, +ERASE, 140165753053184, 140165753081855, +STORE, 94630743547904, 94630743683071, +STORE, 140165736435712, 140165744828415, +SNULL, 140165736439807, 140165744828415, +STORE, 140165736435712, 140165736439807, +STORE, 140165736439808, 140165744828415, +STORE, 140165728043008, 140165736435711, +STORE, 140165593825280, 140165728043007, +SNULL, 140165593825280, 140165653725183, +STORE, 140165653725184, 140165728043007, +STORE, 140165593825280, 140165653725183, +ERASE, 140165593825280, 140165653725183, +SNULL, 140165720834047, 140165728043007, +STORE, 140165653725184, 140165720834047, +STORE, 140165720834048, 140165728043007, +ERASE, 140165720834048, 140165728043007, +SNULL, 140165653860351, 140165720834047, +STORE, 140165653725184, 140165653860351, +STORE, 140165653860352, 140165720834047, +SNULL, 140165728047103, 140165736435711, +STORE, 140165728043008, 140165728047103, +STORE, 140165728047104, 140165736435711, +STORE, 140165645332480, 140165653725183, +SNULL, 140165645336575, 140165653725183, +STORE, 140165645332480, 140165645336575, +STORE, 140165645336576, 140165653725183, +STORE, 140165636939776, 140165645332479, +SNULL, 140165636943871, 140165645332479, +STORE, 140165636939776, 140165636943871, +STORE, 140165636943872, 140165645332479, +STORE, 140165628547072, 140165636939775, +SNULL, 140165628551167, 140165636939775, +STORE, 140165628547072, 140165628551167, +STORE, 140165628551168, 140165636939775, +STORE, 140165620154368, 140165628547071, +STORE, 140165611761664, 140165628547071, +STORE, 140165603368960, 140165628547071, +STORE, 140165469151232, 140165603368959, +SNULL, 140165469151232, 140165519507455, +STORE, 140165519507456, 140165603368959, +STORE, 140165469151232, 140165519507455, +ERASE, 140165469151232, 140165519507455, +SNULL, 140165586616319, 140165603368959, +STORE, 140165519507456, 140165586616319, +STORE, 140165586616320, 140165603368959, +ERASE, 140165586616320, 140165603368959, +STORE, 140165594976256, 140165628547071, +STORE, 140165385289728, 140165586616319, +SNULL, 140165452398591, 140165586616319, +STORE, 140165385289728, 140165452398591, +STORE, 140165452398592, 140165586616319, +SNULL, 140165452398592, 140165519507455, +STORE, 140165519507456, 140165586616319, +STORE, 140165452398592, 140165519507455, +ERASE, 140165452398592, 140165519507455, +STORE, 140165251072000, 140165452398591, +SNULL, 140165318180863, 140165452398591, +STORE, 140165251072000, 140165318180863, +STORE, 140165318180864, 140165452398591, +SNULL, 140165318180864, 140165385289727, +STORE, 140165385289728, 140165452398591, +STORE, 140165318180864, 140165385289727, +ERASE, 140165318180864, 140165385289727, +SNULL, 140165519642623, 140165586616319, +STORE, 140165519507456, 140165519642623, +STORE, 140165519642624, 140165586616319, +SNULL, 140165594976256, 140165611761663, +STORE, 140165611761664, 140165628547071, +STORE, 140165594976256, 140165611761663, +SNULL, 140165611765759, 140165628547071, +STORE, 140165611761664, 140165611765759, +STORE, 140165611765760, 140165628547071, +STORE, 140165385289728, 140165519507455, +SNULL, 140165385424895, 140165519507455, +STORE, 140165385289728, 140165385424895, +STORE, 140165385424896, 140165519507455, +SNULL, 140165594976256, 140165603368959, +STORE, 140165603368960, 140165611761663, +STORE, 140165594976256, 140165603368959, +SNULL, 140165603373055, 140165611761663, +STORE, 140165603368960, 140165603373055, +STORE, 140165603373056, 140165611761663, +SNULL, 140165251207167, 140165318180863, +STORE, 140165251072000, 140165251207167, +STORE, 140165251207168, 140165318180863, +STORE, 140165376897024, 140165385289727, +SNULL, 140165376901119, 140165385289727, +STORE, 140165376897024, 140165376901119, +STORE, 140165376901120, 140165385289727, +SNULL, 140165385424896, 140165452398591, +STORE, 140165452398592, 140165519507455, +STORE, 140165385424896, 140165452398591, +SNULL, 140165452533759, 140165519507455, +STORE, 140165452398592, 140165452533759, +STORE, 140165452533760, 140165519507455, +STORE, 140165368504320, 140165376897023, +SNULL, 140165594980351, 140165603368959, +STORE, 140165594976256, 140165594980351, +STORE, 140165594980352, 140165603368959, +SNULL, 140165368508415, 140165376897023, +STORE, 140165368504320, 140165368508415, +STORE, 140165368508416, 140165376897023, +SNULL, 140165611765760, 140165620154367, +STORE, 140165620154368, 140165628547071, +STORE, 140165611765760, 140165620154367, +SNULL, 140165620158463, 140165628547071, +STORE, 140165620154368, 140165620158463, +STORE, 140165620158464, 140165628547071, +STORE, 140165360111616, 140165368504319, +STORE, 140165351718912, 140165368504319, +STORE, 140165343326208, 140165368504319, +SNULL, 140165343326208, 140165351718911, +STORE, 140165351718912, 140165368504319, +STORE, 140165343326208, 140165351718911, +SNULL, 140165351723007, 140165368504319, +STORE, 140165351718912, 140165351723007, +STORE, 140165351723008, 140165368504319, +SNULL, 140165343330303, 140165351718911, +STORE, 140165343326208, 140165343330303, +STORE, 140165343330304, 140165351718911, +SNULL, 140165351723008, 140165360111615, +STORE, 140165360111616, 140165368504319, +STORE, 140165351723008, 140165360111615, +SNULL, 140165360115711, 140165368504319, +STORE, 140165360111616, 140165360115711, +STORE, 140165360115712, 140165368504319, +STORE, 140165334933504, 140165343326207, +SNULL, 140165334937599, 140165343326207, +STORE, 140165334933504, 140165334937599, +STORE, 140165334937600, 140165343326207, +STORE, 140165326540800, 140165334933503, +STORE, 140165242679296, 140165251071999, +SNULL, 140165242683391, 140165251071999, +STORE, 140165242679296, 140165242683391, +STORE, 140165242683392, 140165251071999, +STORE, 140165234286592, 140165242679295, +STORE, 140165225893888, 140165242679295, +SNULL, 140165225897983, 140165242679295, +STORE, 140165225893888, 140165225897983, +STORE, 140165225897984, 140165242679295, +SNULL, 140165225897984, 140165234286591, +STORE, 140165234286592, 140165242679295, +STORE, 140165225897984, 140165234286591, +SNULL, 140165234290687, 140165242679295, +STORE, 140165234286592, 140165234290687, +STORE, 140165234290688, 140165242679295, +SNULL, 140165326544895, 140165334933503, +STORE, 140165326540800, 140165326544895, +STORE, 140165326544896, 140165334933503, +STORE, 140165217501184, 140165225893887, +STORE, 140165209108480, 140165225893887, +SNULL, 140165209108480, 140165217501183, +STORE, 140165217501184, 140165225893887, +STORE, 140165209108480, 140165217501183, +SNULL, 140165217505279, 140165225893887, +STORE, 140165217501184, 140165217505279, +STORE, 140165217505280, 140165225893887, +SNULL, 140165209112575, 140165217501183, +STORE, 140165209108480, 140165209112575, +STORE, 140165209112576, 140165217501183, +STORE, 140165200715776, 140165209108479, +STORE, 140165066498048, 140165200715775, +SNULL, 140165066498048, 140165116854271, +STORE, 140165116854272, 140165200715775, +STORE, 140165066498048, 140165116854271, +ERASE, 140165066498048, 140165116854271, +SNULL, 140165183963135, 140165200715775, +STORE, 140165116854272, 140165183963135, +STORE, 140165183963136, 140165200715775, +ERASE, 140165183963136, 140165200715775, +SNULL, 140165116989439, 140165183963135, +STORE, 140165116854272, 140165116989439, +STORE, 140165116989440, 140165183963135, +STORE, 140165192323072, 140165209108479, +STORE, 140165108461568, 140165116854271, +STORE, 140164974243840, 140165108461567, +STORE, 140164965851136, 140164974243839, +SNULL, 140164974243840, 140164982636543, +STORE, 140164982636544, 140165108461567, +STORE, 140164974243840, 140164982636543, +ERASE, 140164974243840, 140164982636543, +STORE, 140164965851136, 140164982636543, +STORE, 140164957458432, 140164982636543, +STORE, 140164949065728, 140164982636543, +STORE, 140164940673024, 140164982636543, +STORE, 140164806455296, 140164940673023, +STORE, 140164798062592, 140164806455295, +STORE, 140164789669888, 140164806455295, +STORE, 140164655452160, 140164789669887, +STORE, 140164647059456, 140164655452159, +STORE, 140164638666752, 140164655452159, +SNULL, 140164655452160, 140164714201087, +STORE, 140164714201088, 140164789669887, +STORE, 140164655452160, 140164714201087, +ERASE, 140164655452160, 140164714201087, +STORE, 140164705808384, 140164714201087, +STORE, 140164697415680, 140164714201087, +STORE, 140164504449024, 140164638666751, +SNULL, 140164504449024, 140164512874495, +STORE, 140164512874496, 140164638666751, +STORE, 140164504449024, 140164512874495, +ERASE, 140164504449024, 140164512874495, +STORE, 140164689022976, 140164714201087, +STORE, 140164680630272, 140164714201087, +SNULL, 140164680634367, 140164714201087, +STORE, 140164680630272, 140164680634367, +STORE, 140164680634368, 140164714201087, +STORE, 140164378656768, 140164638666751, +SNULL, 140165192323072, 140165200715775, +STORE, 140165200715776, 140165209108479, +STORE, 140165192323072, 140165200715775, +SNULL, 140165200719871, 140165209108479, +STORE, 140165200715776, 140165200719871, +STORE, 140165200719872, 140165209108479, +SNULL, 140165049745407, 140165108461567, +STORE, 140164982636544, 140165049745407, +STORE, 140165049745408, 140165108461567, +ERASE, 140165049745408, 140165108461567, +SNULL, 140164982771711, 140165049745407, +STORE, 140164982636544, 140164982771711, +STORE, 140164982771712, 140165049745407, +STORE, 140164244439040, 140164638666751, +SNULL, 140164311547903, 140164638666751, +STORE, 140164244439040, 140164311547903, +STORE, 140164311547904, 140164638666751, +SNULL, 140164311547904, 140164378656767, +STORE, 140164378656768, 140164638666751, +STORE, 140164311547904, 140164378656767, +ERASE, 140164311547904, 140164378656767, +SNULL, 140164806455296, 140164848418815, +STORE, 140164848418816, 140164940673023, +STORE, 140164806455296, 140164848418815, +ERASE, 140164806455296, 140164848418815, +SNULL, 140164915527679, 140164940673023, +STORE, 140164848418816, 140164915527679, +STORE, 140164915527680, 140164940673023, +ERASE, 140164915527680, 140164940673023, +STORE, 140164110221312, 140164311547903, +SNULL, 140164177330175, 140164311547903, +STORE, 140164110221312, 140164177330175, +STORE, 140164177330176, 140164311547903, +SNULL, 140164177330176, 140164244439039, +STORE, 140164244439040, 140164311547903, +STORE, 140164177330176, 140164244439039, +ERASE, 140164177330176, 140164244439039, +SNULL, 140164781309951, 140164789669887, +STORE, 140164714201088, 140164781309951, +STORE, 140164781309952, 140164789669887, +ERASE, 140164781309952, 140164789669887, +STORE, 140163976003584, 140164177330175, +SNULL, 140164043112447, 140164177330175, +STORE, 140163976003584, 140164043112447, +STORE, 140164043112448, 140164177330175, +SNULL, 140164043112448, 140164110221311, +STORE, 140164110221312, 140164177330175, +STORE, 140164043112448, 140164110221311, +ERASE, 140164043112448, 140164110221311, +SNULL, 140164579983359, 140164638666751, +STORE, 140164378656768, 140164579983359, +STORE, 140164579983360, 140164638666751, +ERASE, 140164579983360, 140164638666751, +STORE, 140163841785856, 140164043112447, +SNULL, 140163908894719, 140164043112447, +STORE, 140163841785856, 140163908894719, +STORE, 140163908894720, 140164043112447, +SNULL, 140163908894720, 140163976003583, +STORE, 140163976003584, 140164043112447, +STORE, 140163908894720, 140163976003583, +ERASE, 140163908894720, 140163976003583, +SNULL, 140164940673024, 140164965851135, +STORE, 140164965851136, 140164982636543, +STORE, 140164940673024, 140164965851135, +SNULL, 140164965855231, 140164982636543, +STORE, 140164965851136, 140164965855231, +STORE, 140164965855232, 140164982636543, +SNULL, 140164965855232, 140164974243839, +STORE, 140164974243840, 140164982636543, +STORE, 140164965855232, 140164974243839, +SNULL, 140164974247935, 140164982636543, +STORE, 140164974243840, 140164974247935, +STORE, 140164974247936, 140164982636543, +SNULL, 140164445765631, 140164579983359, +STORE, 140164378656768, 140164445765631, +STORE, 140164445765632, 140164579983359, +SNULL, 140164445765632, 140164512874495, +STORE, 140164512874496, 140164579983359, +STORE, 140164445765632, 140164512874495, +ERASE, 140164445765632, 140164512874495, +SNULL, 140164378791935, 140164445765631, +STORE, 140164378656768, 140164378791935, +STORE, 140164378791936, 140164445765631, +SNULL, 140164789673983, 140164806455295, +STORE, 140164789669888, 140164789673983, +STORE, 140164789673984, 140164806455295, +SNULL, 140164789673984, 140164798062591, +STORE, 140164798062592, 140164806455295, +STORE, 140164789673984, 140164798062591, +SNULL, 140164798066687, 140164806455295, +STORE, 140164798062592, 140164798066687, +STORE, 140164798066688, 140164806455295, +SNULL, 140164638670847, 140164655452159, +STORE, 140164638666752, 140164638670847, +STORE, 140164638670848, 140164655452159, +STORE, 140165100068864, 140165116854271, +STORE, 140165091676160, 140165116854271, +STORE, 140165083283456, 140165116854271, +SNULL, 140164244574207, 140164311547903, +STORE, 140164244439040, 140164244574207, +STORE, 140164244574208, 140164311547903, +SNULL, 140164848553983, 140164915527679, +STORE, 140164848418816, 140164848553983, +STORE, 140164848553984, 140164915527679, +SNULL, 140164110356479, 140164177330175, +STORE, 140164110221312, 140164110356479, +STORE, 140164110356480, 140164177330175, +SNULL, 140164714336255, 140164781309951, +STORE, 140164714201088, 140164714336255, +STORE, 140164714336256, 140164781309951, +SNULL, 140163976138751, 140164043112447, +STORE, 140163976003584, 140163976138751, +STORE, 140163976138752, 140164043112447, +SNULL, 140164513009663, 140164579983359, +STORE, 140164512874496, 140164513009663, +STORE, 140164513009664, 140164579983359, +SNULL, 140163841921023, 140163908894719, +STORE, 140163841785856, 140163841921023, +STORE, 140163841921024, 140163908894719, +SNULL, 140165083283456, 140165100068863, +STORE, 140165100068864, 140165116854271, +STORE, 140165083283456, 140165100068863, +SNULL, 140165100072959, 140165116854271, +STORE, 140165100068864, 140165100072959, +STORE, 140165100072960, 140165116854271, +SNULL, 140165100072960, 140165108461567, +STORE, 140165108461568, 140165116854271, +STORE, 140165100072960, 140165108461567, +SNULL, 140165108465663, 140165116854271, +STORE, 140165108461568, 140165108465663, +STORE, 140165108465664, 140165116854271, +STORE, 140165074890752, 140165100068863, +SNULL, 140165074894847, 140165100068863, +STORE, 140165074890752, 140165074894847, +STORE, 140165074894848, 140165100068863, +STORE, 140165066498048, 140165074890751, +STORE, 140165058105344, 140165074890751, +STORE, 140164932280320, 140164965851135, +SNULL, 140165192327167, 140165200715775, +STORE, 140165192323072, 140165192327167, +STORE, 140165192327168, 140165200715775, +STORE, 140164923887616, 140164965851135, +SNULL, 140164923891711, 140164965851135, +STORE, 140164923887616, 140164923891711, +STORE, 140164923891712, 140164965851135, +SNULL, 140164680634368, 140164705808383, +STORE, 140164705808384, 140164714201087, +STORE, 140164680634368, 140164705808383, +SNULL, 140164705812479, 140164714201087, +STORE, 140164705808384, 140164705812479, +STORE, 140164705812480, 140164714201087, +SNULL, 140164680634368, 140164697415679, +STORE, 140164697415680, 140164705808383, +STORE, 140164680634368, 140164697415679, +SNULL, 140164697419775, 140164705808383, +STORE, 140164697415680, 140164697419775, +STORE, 140164697419776, 140164705808383, +STORE, 140164840026112, 140164848418815, +STORE, 140164831633408, 140164848418815, +STORE, 140164823240704, 140164848418815, +SNULL, 140165074894848, 140165083283455, +STORE, 140165083283456, 140165100068863, +STORE, 140165074894848, 140165083283455, +SNULL, 140165083287551, 140165100068863, +STORE, 140165083283456, 140165083287551, +STORE, 140165083287552, 140165100068863, +SNULL, 140165083287552, 140165091676159, +STORE, 140165091676160, 140165100068863, +STORE, 140165083287552, 140165091676159, +SNULL, 140165091680255, 140165100068863, +STORE, 140165091676160, 140165091680255, +STORE, 140165091680256, 140165100068863, +SNULL, 140164638670848, 140164647059455, +STORE, 140164647059456, 140164655452159, +STORE, 140164638670848, 140164647059455, +SNULL, 140164647063551, 140164655452159, +STORE, 140164647059456, 140164647063551, +STORE, 140164647063552, 140164655452159, +SNULL, 140164923891712, 140164940673023, +STORE, 140164940673024, 140164965851135, +STORE, 140164923891712, 140164940673023, +SNULL, 140164940677119, 140164965851135, +STORE, 140164940673024, 140164940677119, +STORE, 140164940677120, 140164965851135, +SNULL, 140164940677120, 140164949065727, +STORE, 140164949065728, 140164965851135, +STORE, 140164940677120, 140164949065727, +SNULL, 140164949069823, 140164965851135, +STORE, 140164949065728, 140164949069823, +STORE, 140164949069824, 140164965851135, +SNULL, 140164949069824, 140164957458431, +STORE, 140164957458432, 140164965851135, +STORE, 140164949069824, 140164957458431, +SNULL, 140164957462527, 140164965851135, +STORE, 140164957458432, 140164957462527, +STORE, 140164957462528, 140164965851135, +SNULL, 140164680634368, 140164689022975, +STORE, 140164689022976, 140164697415679, +STORE, 140164680634368, 140164689022975, +SNULL, 140164689027071, 140164697415679, +STORE, 140164689022976, 140164689027071, +STORE, 140164689027072, 140164697415679, +STORE, 140164814848000, 140164848418815, +SNULL, 140165058105344, 140165066498047, +STORE, 140165066498048, 140165074890751, +STORE, 140165058105344, 140165066498047, +SNULL, 140165066502143, 140165074890751, +STORE, 140165066498048, 140165066502143, +STORE, 140165066502144, 140165074890751, +SNULL, 140165058109439, 140165066498047, +STORE, 140165058105344, 140165058109439, +STORE, 140165058109440, 140165066498047, +STORE, 140164798066688, 140164814847999, +SNULL, 140164798066688, 140164806455295, +STORE, 140164806455296, 140164814847999, +STORE, 140164798066688, 140164806455295, +SNULL, 140164806459391, 140164814847999, +STORE, 140164806455296, 140164806459391, +STORE, 140164806459392, 140164814847999, +SNULL, 140164923891712, 140164932280319, +STORE, 140164932280320, 140164940673023, +STORE, 140164923891712, 140164932280319, +SNULL, 140164932284415, 140164940673023, +STORE, 140164932280320, 140164932284415, +STORE, 140164932284416, 140164940673023, +STORE, 140164672237568, 140164680630271, +STORE, 140164663844864, 140164680630271, +STORE, 140164647063552, 140164680630271, +SNULL, 140164647063552, 140164655452159, +STORE, 140164655452160, 140164680630271, +STORE, 140164647063552, 140164655452159, +SNULL, 140164655456255, 140164680630271, +STORE, 140164655452160, 140164655456255, +STORE, 140164655456256, 140164680630271, +STORE, 140164630274048, 140164638666751, +SNULL, 140164814852095, 140164848418815, +STORE, 140164814848000, 140164814852095, +STORE, 140164814852096, 140164848418815, +SNULL, 140164814852096, 140164831633407, +STORE, 140164831633408, 140164848418815, +STORE, 140164814852096, 140164831633407, +SNULL, 140164831637503, 140164848418815, +STORE, 140164831633408, 140164831637503, +STORE, 140164831637504, 140164848418815, +STORE, 140164621881344, 140164638666751, +SNULL, 140164831637504, 140164840026111, +STORE, 140164840026112, 140164848418815, +STORE, 140164831637504, 140164840026111, +SNULL, 140164840030207, 140164848418815, +STORE, 140164840026112, 140164840030207, +STORE, 140164840030208, 140164848418815, +STORE, 140164613488640, 140164638666751, +SNULL, 140164613492735, 140164638666751, +STORE, 140164613488640, 140164613492735, +STORE, 140164613492736, 140164638666751, +STORE, 140164605095936, 140164613488639, +SNULL, 140164605100031, 140164613488639, +STORE, 140164605095936, 140164605100031, +STORE, 140164605100032, 140164613488639, +STORE, 140164596703232, 140164605095935, +STORE, 140164588310528, 140164605095935, +SNULL, 140164588314623, 140164605095935, +STORE, 140164588310528, 140164588314623, +STORE, 140164588314624, 140164605095935, +STORE, 140164504481792, 140164512874495, +STORE, 140164496089088, 140164512874495, +SNULL, 140164496089088, 140164504481791, +STORE, 140164504481792, 140164512874495, +STORE, 140164496089088, 140164504481791, +SNULL, 140164504485887, 140164512874495, +STORE, 140164504481792, 140164504485887, +STORE, 140164504485888, 140164512874495, +SNULL, 140164613492736, 140164630274047, +STORE, 140164630274048, 140164638666751, +STORE, 140164613492736, 140164630274047, +SNULL, 140164630278143, 140164638666751, +STORE, 140164630274048, 140164630278143, +STORE, 140164630278144, 140164638666751, +STORE, 140164487696384, 140164504481791, +STORE, 140164479303680, 140164504481791, +SNULL, 140164814852096, 140164823240703, +STORE, 140164823240704, 140164831633407, +STORE, 140164814852096, 140164823240703, +SNULL, 140164823244799, 140164831633407, +STORE, 140164823240704, 140164823244799, +STORE, 140164823244800, 140164831633407, +STORE, 140164470910976, 140164504481791, +SNULL, 140164470910976, 140164496089087, +STORE, 140164496089088, 140164504481791, +STORE, 140164470910976, 140164496089087, +SNULL, 140164496093183, 140164504481791, +STORE, 140164496089088, 140164496093183, +STORE, 140164496093184, 140164504481791, +SNULL, 140164655456256, 140164672237567, +STORE, 140164672237568, 140164680630271, +STORE, 140164655456256, 140164672237567, +SNULL, 140164672241663, 140164680630271, +STORE, 140164672237568, 140164672241663, +STORE, 140164672241664, 140164680630271, +STORE, 140164462518272, 140164496089087, +STORE, 140164454125568, 140164496089087, +SNULL, 140164655456256, 140164663844863, +STORE, 140164663844864, 140164672237567, +STORE, 140164655456256, 140164663844863, +SNULL, 140164663848959, 140164672237567, +STORE, 140164663844864, 140164663848959, +STORE, 140164663848960, 140164672237567, +STORE, 140164370264064, 140164378656767, +STORE, 140164361871360, 140164378656767, +STORE, 140164353478656, 140164378656767, +STORE, 140164345085952, 140164378656767, +SNULL, 140164345085952, 140164353478655, +STORE, 140164353478656, 140164378656767, +STORE, 140164345085952, 140164353478655, +SNULL, 140164353482751, 140164378656767, +STORE, 140164353478656, 140164353482751, +STORE, 140164353482752, 140164378656767, +SNULL, 140164454125568, 140164487696383, +STORE, 140164487696384, 140164496089087, +STORE, 140164454125568, 140164487696383, +SNULL, 140164487700479, 140164496089087, +STORE, 140164487696384, 140164487700479, +STORE, 140164487700480, 140164496089087, +STORE, 140164336693248, 140164353478655, +SNULL, 140164336697343, 140164353478655, +STORE, 140164336693248, 140164336697343, +STORE, 140164336697344, 140164353478655, +STORE, 140164328300544, 140164336693247, +SNULL, 140164454125568, 140164479303679, +STORE, 140164479303680, 140164487696383, +STORE, 140164454125568, 140164479303679, +SNULL, 140164479307775, 140164487696383, +STORE, 140164479303680, 140164479307775, +STORE, 140164479307776, 140164487696383, +STORE, 140164319907840, 140164336693247, +STORE, 140164236046336, 140164244439039, +SNULL, 140164588314624, 140164596703231, +STORE, 140164596703232, 140164605095935, +STORE, 140164588314624, 140164596703231, +SNULL, 140164596707327, 140164605095935, +STORE, 140164596703232, 140164596707327, +STORE, 140164596707328, 140164605095935, +SNULL, 140164454125568, 140164462518271, +STORE, 140164462518272, 140164479303679, +STORE, 140164454125568, 140164462518271, +SNULL, 140164462522367, 140164479303679, +STORE, 140164462518272, 140164462522367, +STORE, 140164462522368, 140164479303679, +STORE, 140164227653632, 140164244439039, +SNULL, 140164227657727, 140164244439039, +STORE, 140164227653632, 140164227657727, +STORE, 140164227657728, 140164244439039, +SNULL, 140164462522368, 140164470910975, +STORE, 140164470910976, 140164479303679, +STORE, 140164462522368, 140164470910975, +SNULL, 140164470915071, 140164479303679, +STORE, 140164470910976, 140164470915071, +STORE, 140164470915072, 140164479303679, +SNULL, 140164613492736, 140164621881343, +STORE, 140164621881344, 140164630274047, +STORE, 140164613492736, 140164621881343, +SNULL, 140164621885439, 140164630274047, +STORE, 140164621881344, 140164621885439, +STORE, 140164621885440, 140164630274047, +SNULL, 140164353482752, 140164370264063, +STORE, 140164370264064, 140164378656767, +STORE, 140164353482752, 140164370264063, +SNULL, 140164370268159, 140164378656767, +STORE, 140164370264064, 140164370268159, +STORE, 140164370268160, 140164378656767, +STORE, 140164219260928, 140164227653631, +SNULL, 140164319911935, 140164336693247, +STORE, 140164319907840, 140164319911935, +STORE, 140164319911936, 140164336693247, +SNULL, 140164336697344, 140164345085951, +STORE, 140164345085952, 140164353478655, +STORE, 140164336697344, 140164345085951, +SNULL, 140164345090047, 140164353478655, +STORE, 140164345085952, 140164345090047, +STORE, 140164345090048, 140164353478655, +SNULL, 140164319911936, 140164328300543, +STORE, 140164328300544, 140164336693247, +STORE, 140164319911936, 140164328300543, +SNULL, 140164328304639, 140164336693247, +STORE, 140164328300544, 140164328304639, +STORE, 140164328304640, 140164336693247, +SNULL, 140164454129663, 140164462518271, +STORE, 140164454125568, 140164454129663, +STORE, 140164454129664, 140164462518271, +STORE, 140164210868224, 140164227653631, +STORE, 140164202475520, 140164227653631, +STORE, 140164194082816, 140164227653631, +SNULL, 140164194086911, 140164227653631, +STORE, 140164194082816, 140164194086911, +STORE, 140164194086912, 140164227653631, +SNULL, 140164353482752, 140164361871359, +STORE, 140164361871360, 140164370264063, +STORE, 140164353482752, 140164361871359, +SNULL, 140164361875455, 140164370264063, +STORE, 140164361871360, 140164361875455, +STORE, 140164361875456, 140164370264063, +SNULL, 140164227657728, 140164236046335, +STORE, 140164236046336, 140164244439039, +STORE, 140164227657728, 140164236046335, +SNULL, 140164236050431, 140164244439039, +STORE, 140164236046336, 140164236050431, +STORE, 140164236050432, 140164244439039, +STORE, 140164185690112, 140164194082815, +SNULL, 140164194086912, 140164219260927, +STORE, 140164219260928, 140164227653631, +STORE, 140164194086912, 140164219260927, +SNULL, 140164219265023, 140164227653631, +STORE, 140164219260928, 140164219265023, +STORE, 140164219265024, 140164227653631, +STORE, 140164101828608, 140164110221311, +STORE, 140164093435904, 140164110221311, +STORE, 140164085043200, 140164110221311, +SNULL, 140164085047295, 140164110221311, +STORE, 140164085043200, 140164085047295, +STORE, 140164085047296, 140164110221311, +STORE, 140164076650496, 140164085043199, +SNULL, 140164185694207, 140164194082815, +STORE, 140164185690112, 140164185694207, +STORE, 140164185694208, 140164194082815, +SNULL, 140164085047296, 140164101828607, +STORE, 140164101828608, 140164110221311, +STORE, 140164085047296, 140164101828607, +SNULL, 140164101832703, 140164110221311, +STORE, 140164101828608, 140164101832703, +STORE, 140164101832704, 140164110221311, +SNULL, 140164085047296, 140164093435903, +STORE, 140164093435904, 140164101828607, +STORE, 140164085047296, 140164093435903, +SNULL, 140164093439999, 140164101828607, +STORE, 140164093435904, 140164093439999, +STORE, 140164093440000, 140164101828607, +SNULL, 140164194086912, 140164202475519, +STORE, 140164202475520, 140164219260927, +STORE, 140164194086912, 140164202475519, +SNULL, 140164202479615, 140164219260927, +STORE, 140164202475520, 140164202479615, +STORE, 140164202479616, 140164219260927, +SNULL, 140164202479616, 140164210868223, +STORE, 140164210868224, 140164219260927, +STORE, 140164202479616, 140164210868223, +SNULL, 140164210872319, 140164219260927, +STORE, 140164210868224, 140164210872319, +STORE, 140164210872320, 140164219260927, +SNULL, 140164076654591, 140164085043199, +STORE, 140164076650496, 140164076654591, +STORE, 140164076654592, 140164085043199, +STORE, 140164068257792, 140164076650495, +SNULL, 140164068261887, 140164076650495, +STORE, 140164068257792, 140164068261887, +STORE, 140164068261888, 140164076650495, +STORE, 140165753053184, 140165753081855, +STORE, 140165725851648, 140165728043007, +SNULL, 140165725851648, 140165725941759, +STORE, 140165725941760, 140165728043007, +STORE, 140165725851648, 140165725941759, +SNULL, 140165728034815, 140165728043007, +STORE, 140165725941760, 140165728034815, +STORE, 140165728034816, 140165728043007, +ERASE, 140165728034816, 140165728043007, +STORE, 140165728034816, 140165728043007, +SNULL, 140165728038911, 140165728043007, +STORE, 140165728034816, 140165728038911, +STORE, 140165728038912, 140165728043007, +ERASE, 140165753053184, 140165753081855, +ERASE, 140164638666752, 140164638670847, +ERASE, 140164638670848, 140164647059455, +ERASE, 140165091676160, 140165091680255, +ERASE, 140165091680256, 140165100068863, +ERASE, 140164613488640, 140164613492735, +ERASE, 140164613492736, 140164621881343, +ERASE, 140164319907840, 140164319911935, +ERASE, 140164319911936, 140164328300543, +ERASE, 140165620154368, 140165620158463, +ERASE, 140165620158464, 140165628547071, +ERASE, 140164798062592, 140164798066687, +ERASE, 140164798066688, 140164806455295, +ERASE, 140164789669888, 140164789673983, +ERASE, 140164789673984, 140164798062591, +ERASE, 140164965851136, 140164965855231, +ERASE, 140164965855232, 140164974243839, +ERASE, 140165074890752, 140165074894847, +ERASE, 140165074894848, 140165083283455, +ERASE, 140164672237568, 140164672241663, +ERASE, 140164672241664, 140164680630271, +ERASE, 140164454125568, 140164454129663, +ERASE, 140164454129664, 140164462518271, +ERASE, 140165200715776, 140165200719871, +ERASE, 140165200719872, 140165209108479, +ERASE, 140164932280320, 140164932284415, +ERASE, 140164932284416, 140164940673023, +ERASE, 140164663844864, 140164663848959, +ERASE, 140164663848960, 140164672237567, +ERASE, 140164697415680, 140164697419775, +ERASE, 140164697419776, 140164705808383, +ERASE, 140164831633408, 140164831637503, +ERASE, 140164831637504, 140164840026111, +ERASE, 140165192323072, 140165192327167, +ERASE, 140165192327168, 140165200715775, +ERASE, 140165108461568, 140165108465663, +ERASE, 140165108465664, 140165116854271, +ERASE, 140164840026112, 140164840030207, +ERASE, 140164840030208, 140164848418815, +ERASE, 140164647059456, 140164647063551, +ERASE, 140164647063552, 140164655452159, +ERASE, 140165083283456, 140165083287551, +ERASE, 140165083287552, 140165091676159, +ERASE, 140164923887616, 140164923891711, +ERASE, 140164923891712, 140164932280319, +ERASE, 140164823240704, 140164823244799, +ERASE, 140164823244800, 140164831633407, +ERASE, 140164227653632, 140164227657727, +ERASE, 140164227657728, 140164236046335, +ERASE, 140164957458432, 140164957462527, +ERASE, 140164957462528, 140164965851135, +ERASE, 140164680630272, 140164680634367, +ERASE, 140164680634368, 140164689022975, +ERASE, 140164974243840, 140164974247935, +ERASE, 140164974247936, 140164982636543, +ERASE, 140165066498048, 140165066502143, +ERASE, 140165066502144, 140165074890751, +ERASE, 140164621881344, 140164621885439, +ERASE, 140164621885440, 140164630274047, +ERASE, 140164949065728, 140164949069823, +ERASE, 140164949069824, 140164957458431, +ERASE, 140164588310528, 140164588314623, +ERASE, 140164588314624, 140164596703231, +ERASE, 140164806455296, 140164806459391, +ERASE, 140164806459392, 140164814847999, +ERASE, 140164940673024, 140164940677119, +ERASE, 140164940677120, 140164949065727, +ERASE, 140164596703232, 140164596707327, +ERASE, 140164596707328, 140164605095935, +ERASE, 140164605095936, 140164605100031, +ERASE, 140164605100032, 140164613488639, +ERASE, 140164655452160, 140164655456255, +ERASE, 140164655456256, 140164663844863, +ERASE, 140164705808384, 140164705812479, +ERASE, 140164705812480, 140164714201087, +ERASE, 140164689022976, 140164689027071, +ERASE, 140164689027072, 140164697415679, +ERASE, 140164630274048, 140164630278143, +ERASE, 140164630278144, 140164638666751, +ERASE, 140164479303680, 140164479307775, +ERASE, 140164479307776, 140164487696383, +ERASE, 140164236046336, 140164236050431, +ERASE, 140164236050432, 140164244439039, +ERASE, 140164085043200, 140164085047295, +ERASE, 140164085047296, 140164093435903, +ERASE, 140164345085952, 140164345090047, +ERASE, 140164345090048, 140164353478655, +ERASE, 140164101828608, 140164101832703, +ERASE, 140164101832704, 140164110221311, +ERASE, 140164370264064, 140164370268159, +ERASE, 140164370268160, 140164378656767, +ERASE, 140164336693248, 140164336697343, +ERASE, 140164336697344, 140164345085951, +ERASE, 140164194082816, 140164194086911, +ERASE, 140164194086912, 140164202475519, +ERASE, 140164353478656, 140164353482751, +ERASE, 140164353482752, 140164361871359, +ERASE, 140164210868224, 140164210872319, +ERASE, 140164210872320, 140164219260927, +ERASE, 140164814848000, 140164814852095, +ERASE, 140164814852096, 140164823240703, +ERASE, 140164504481792, 140164504485887, +ERASE, 140164504485888, 140164512874495, +ERASE, 140165100068864, 140165100072959, +ERASE, 140165100072960, 140165108461567, +ERASE, 140164361871360, 140164361875455, +ERASE, 140164361875456, 140164370264063, +ERASE, 140164470910976, 140164470915071, +ERASE, 140164470915072, 140164479303679, +ERASE, 140164076650496, 140164076654591, +ERASE, 140164076654592, 140164085043199, +ERASE, 140164202475520, 140164202479615, +ERASE, 140164202479616, 140164210868223, +ERASE, 140164462518272, 140164462522367, +ERASE, 140164462522368, 140164470910975, +ERASE, 140165351718912, 140165351723007, +ERASE, 140165351723008, 140165360111615, +ERASE, 140164328300544, 140164328304639, +ERASE, 140164328304640, 140164336693247, +ERASE, 140164093435904, 140164093439999, +ERASE, 140164093440000, 140164101828607, +ERASE, 140165603368960, 140165603373055, +ERASE, 140165603373056, 140165611761663, +ERASE, 140165368504320, 140165368508415, +ERASE, 140165368508416, 140165376897023, +ERASE, 140165334933504, 140165334937599, +ERASE, 140165334937600, 140165343326207, +ERASE, 140165594976256, 140165594980351, +ERASE, 140165594980352, 140165603368959, +ERASE, 140164487696384, 140164487700479, +ERASE, 140164487700480, 140164496089087, +ERASE, 140164219260928, 140164219265023, +ERASE, 140164219265024, 140164227653631, +ERASE, 140164185690112, 140164185694207, +ERASE, 140164185694208, 140164194082815, +ERASE, 140164068257792, 140164068261887, +ERASE, 140164068261888, 140164076650495, +ERASE, 140165225893888, 140165225897983, +ERASE, 140165225897984, 140165234286591, +ERASE, 140165058105344, 140165058109439, + }; + unsigned long set31[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140730890784768, 140737488351231, +SNULL, 140730890788863, 140737488351231, +STORE, 140730890784768, 140730890788863, +STORE, 140730890653696, 140730890788863, +STORE, 94577123659776, 94577125912575, +SNULL, 94577123790847, 94577125912575, +STORE, 94577123659776, 94577123790847, +STORE, 94577123790848, 94577125912575, +ERASE, 94577123790848, 94577125912575, +STORE, 94577125883904, 94577125892095, +STORE, 94577125892096, 94577125912575, +STORE, 140624060407808, 140624062660607, +SNULL, 140624060551167, 140624062660607, +STORE, 140624060407808, 140624060551167, +STORE, 140624060551168, 140624062660607, +ERASE, 140624060551168, 140624062660607, +STORE, 140624062648320, 140624062656511, +STORE, 140624062656512, 140624062660607, +STORE, 140730892140544, 140730892144639, +STORE, 140730892128256, 140730892140543, +STORE, 140624062619648, 140624062648319, +STORE, 140624062611456, 140624062619647, +STORE, 140624058191872, 140624060407807, +SNULL, 140624058191872, 140624058290175, +STORE, 140624058290176, 140624060407807, +STORE, 140624058191872, 140624058290175, +SNULL, 140624060383231, 140624060407807, +STORE, 140624058290176, 140624060383231, +STORE, 140624060383232, 140624060407807, +SNULL, 140624060383232, 140624060391423, +STORE, 140624060391424, 140624060407807, +STORE, 140624060383232, 140624060391423, +ERASE, 140624060383232, 140624060391423, +STORE, 140624060383232, 140624060391423, +ERASE, 140624060391424, 140624060407807, +STORE, 140624060391424, 140624060407807, +STORE, 140624054394880, 140624058191871, +SNULL, 140624054394880, 140624056053759, +STORE, 140624056053760, 140624058191871, +STORE, 140624054394880, 140624056053759, +SNULL, 140624058150911, 140624058191871, +STORE, 140624056053760, 140624058150911, +STORE, 140624058150912, 140624058191871, +SNULL, 140624058150912, 140624058175487, +STORE, 140624058175488, 140624058191871, +STORE, 140624058150912, 140624058175487, +ERASE, 140624058150912, 140624058175487, +STORE, 140624058150912, 140624058175487, +ERASE, 140624058175488, 140624058191871, +STORE, 140624058175488, 140624058191871, +STORE, 140624062603264, 140624062619647, +SNULL, 140624058167295, 140624058175487, +STORE, 140624058150912, 140624058167295, +STORE, 140624058167296, 140624058175487, +SNULL, 140624060387327, 140624060391423, +STORE, 140624060383232, 140624060387327, +STORE, 140624060387328, 140624060391423, +SNULL, 94577125887999, 94577125892095, +STORE, 94577125883904, 94577125887999, +STORE, 94577125888000, 94577125892095, +SNULL, 140624062652415, 140624062656511, +STORE, 140624062648320, 140624062652415, +STORE, 140624062652416, 140624062656511, +ERASE, 140624062619648, 140624062648319, +STORE, 94577157709824, 94577157844991, +STORE, 140624046002176, 140624054394879, +SNULL, 140624046006271, 140624054394879, +STORE, 140624046002176, 140624046006271, +STORE, 140624046006272, 140624054394879, +STORE, 140624037609472, 140624046002175, +STORE, 140623903391744, 140624037609471, +SNULL, 140623903391744, 140623940157439, +STORE, 140623940157440, 140624037609471, +STORE, 140623903391744, 140623940157439, +ERASE, 140623903391744, 140623940157439, +SNULL, 140624007266303, 140624037609471, +STORE, 140623940157440, 140624007266303, +STORE, 140624007266304, 140624037609471, +ERASE, 140624007266304, 140624037609471, +SNULL, 140623940292607, 140624007266303, +STORE, 140623940157440, 140623940292607, +STORE, 140623940292608, 140624007266303, +SNULL, 140624037613567, 140624046002175, +STORE, 140624037609472, 140624037613567, +STORE, 140624037613568, 140624046002175, +STORE, 140624029216768, 140624037609471, +SNULL, 140624029220863, 140624037609471, +STORE, 140624029216768, 140624029220863, +STORE, 140624029220864, 140624037609471, +STORE, 140624020824064, 140624029216767, +SNULL, 140624020828159, 140624029216767, +STORE, 140624020824064, 140624020828159, +STORE, 140624020828160, 140624029216767, +STORE, 140624012431360, 140624020824063, +SNULL, 140624012435455, 140624020824063, +STORE, 140624012431360, 140624012435455, +STORE, 140624012435456, 140624020824063, +STORE, 140623931764736, 140623940157439, +STORE, 140623797547008, 140623931764735, +SNULL, 140623797547008, 140623805939711, +STORE, 140623805939712, 140623931764735, +STORE, 140623797547008, 140623805939711, +ERASE, 140623797547008, 140623805939711, +SNULL, 140623873048575, 140623931764735, +STORE, 140623805939712, 140623873048575, +STORE, 140623873048576, 140623931764735, +ERASE, 140623873048576, 140623931764735, +STORE, 140623923372032, 140623940157439, +STORE, 140623914979328, 140623940157439, +STORE, 140623906586624, 140623940157439, +STORE, 140623671721984, 140623873048575, +SNULL, 140623738830847, 140623873048575, +STORE, 140623671721984, 140623738830847, +STORE, 140623738830848, 140623873048575, +SNULL, 140623738830848, 140623805939711, +STORE, 140623805939712, 140623873048575, +STORE, 140623738830848, 140623805939711, +ERASE, 140623738830848, 140623805939711, +SNULL, 140623806074879, 140623873048575, +STORE, 140623805939712, 140623806074879, +STORE, 140623806074880, 140623873048575, +SNULL, 140623906586624, 140623931764735, +STORE, 140623931764736, 140623940157439, +STORE, 140623906586624, 140623931764735, +SNULL, 140623931768831, 140623940157439, +STORE, 140623931764736, 140623931768831, +STORE, 140623931768832, 140623940157439, +STORE, 140623537504256, 140623738830847, +SNULL, 140623537504256, 140623671721983, +STORE, 140623671721984, 140623738830847, +STORE, 140623537504256, 140623671721983, +SNULL, 140623671857151, 140623738830847, +STORE, 140623671721984, 140623671857151, +STORE, 140623671857152, 140623738830847, +SNULL, 140623604613119, 140623671721983, +STORE, 140623537504256, 140623604613119, +STORE, 140623604613120, 140623671721983, +ERASE, 140623604613120, 140623671721983, +SNULL, 140623537639423, 140623604613119, +STORE, 140623537504256, 140623537639423, +STORE, 140623537639424, 140623604613119, +STORE, 140623537639424, 140623671721983, +SNULL, 140623537639424, 140623604613119, +STORE, 140623604613120, 140623671721983, +STORE, 140623537639424, 140623604613119, +SNULL, 140623604748287, 140623671721983, +STORE, 140623604613120, 140623604748287, +STORE, 140623604748288, 140623671721983, +STORE, 140623898193920, 140623931764735, +SNULL, 140623898193920, 140623923372031, +STORE, 140623923372032, 140623931764735, +STORE, 140623898193920, 140623923372031, +SNULL, 140623923376127, 140623931764735, +STORE, 140623923372032, 140623923376127, +STORE, 140623923376128, 140623931764735, +STORE, 140623889801216, 140623923372031, +SNULL, 140623889801216, 140623898193919, +STORE, 140623898193920, 140623923372031, +STORE, 140623889801216, 140623898193919, +SNULL, 140623898198015, 140623923372031, +STORE, 140623898193920, 140623898198015, +STORE, 140623898198016, 140623923372031, +SNULL, 140623889805311, 140623898193919, +STORE, 140623889801216, 140623889805311, +STORE, 140623889805312, 140623898193919, +SNULL, 140623898198016, 140623906586623, +STORE, 140623906586624, 140623923372031, +STORE, 140623898198016, 140623906586623, +SNULL, 140623906590719, 140623923372031, +STORE, 140623906586624, 140623906590719, +STORE, 140623906590720, 140623923372031, +STORE, 140623881408512, 140623889801215, +SNULL, 140623906590720, 140623914979327, +STORE, 140623914979328, 140623923372031, +STORE, 140623906590720, 140623914979327, +SNULL, 140623914983423, 140623923372031, +STORE, 140623914979328, 140623914983423, +STORE, 140623914983424, 140623923372031, +SNULL, 140623881412607, 140623889801215, +STORE, 140623881408512, 140623881412607, +STORE, 140623881412608, 140623889801215, +STORE, 140623797547008, 140623805939711, +STORE, 140623789154304, 140623805939711, +STORE, 140623780761600, 140623805939711, +SNULL, 140623780761600, 140623789154303, +STORE, 140623789154304, 140623805939711, +STORE, 140623780761600, 140623789154303, +SNULL, 140623789158399, 140623805939711, +STORE, 140623789154304, 140623789158399, +STORE, 140623789158400, 140623805939711, +STORE, 140623772368896, 140623789154303, +STORE, 140623763976192, 140623789154303, +SNULL, 140623763976192, 140623780761599, +STORE, 140623780761600, 140623789154303, +STORE, 140623763976192, 140623780761599, +SNULL, 140623780765695, 140623789154303, +STORE, 140623780761600, 140623780765695, +STORE, 140623780765696, 140623789154303, +SNULL, 140623789158400, 140623797547007, +STORE, 140623797547008, 140623805939711, +STORE, 140623789158400, 140623797547007, +SNULL, 140623797551103, 140623805939711, +STORE, 140623797547008, 140623797551103, +STORE, 140623797551104, 140623805939711, +SNULL, 140623763976192, 140623772368895, +STORE, 140623772368896, 140623780761599, +STORE, 140623763976192, 140623772368895, +SNULL, 140623772372991, 140623780761599, +STORE, 140623772368896, 140623772372991, +STORE, 140623772372992, 140623780761599, +SNULL, 140623763980287, 140623772368895, +STORE, 140623763976192, 140623763980287, +STORE, 140623763980288, 140623772368895, +STORE, 140623755583488, 140623763976191, +STORE, 140623747190784, 140623763976191, +SNULL, 140623747190784, 140623755583487, +STORE, 140623755583488, 140623763976191, +STORE, 140623747190784, 140623755583487, +SNULL, 140623755587583, 140623763976191, +STORE, 140623755583488, 140623755587583, +STORE, 140623755587584, 140623763976191, +STORE, 140623529111552, 140623537504255, +SNULL, 140623747194879, 140623755583487, +STORE, 140623747190784, 140623747194879, +STORE, 140623747194880, 140623755583487, +SNULL, 140623529115647, 140623537504255, +STORE, 140623529111552, 140623529115647, +STORE, 140623529115648, 140623537504255, +STORE, 140623520718848, 140623529111551, +SNULL, 140623520722943, 140623529111551, +STORE, 140623520718848, 140623520722943, +STORE, 140623520722944, 140623529111551, +STORE, 140623512326144, 140623520718847, +STORE, 140623503933440, 140623520718847, +STORE, 140623495540736, 140623520718847, +STORE, 140623361323008, 140623495540735, +STORE, 140623227105280, 140623495540735, +STORE, 140623218712576, 140623227105279, +STORE, 140623084494848, 140623218712575, +STORE, 140623076102144, 140623084494847, +STORE, 140622941884416, 140623076102143, +SNULL, 140622941884416, 140623000633343, +STORE, 140623000633344, 140623076102143, +STORE, 140622941884416, 140623000633343, +ERASE, 140622941884416, 140623000633343, +STORE, 140622992240640, 140623000633343, +STORE, 140622983847936, 140623000633343, +STORE, 140622849630208, 140622983847935, +STORE, 140622841237504, 140622849630207, +SNULL, 140622849630208, 140622866415615, +STORE, 140622866415616, 140622983847935, +STORE, 140622849630208, 140622866415615, +ERASE, 140622849630208, 140622866415615, +STORE, 140622858022912, 140622866415615, +SNULL, 140622933524479, 140622983847935, +STORE, 140622866415616, 140622933524479, +STORE, 140622933524480, 140622983847935, +ERASE, 140622933524480, 140622983847935, +STORE, 140622975455232, 140623000633343, +STORE, 140622707019776, 140622841237503, +STORE, 140622967062528, 140623000633343, +STORE, 140622572802048, 140622841237503, +STORE, 140622958669824, 140623000633343, +STORE, 140622438584320, 140622841237503, +STORE, 140622950277120, 140623000633343, +SNULL, 140622858027007, 140622866415615, +STORE, 140622858022912, 140622858027007, +STORE, 140622858027008, 140622866415615, +STORE, 140622941884416, 140623000633343, +STORE, 140622841237504, 140622858022911, +SNULL, 140622841237504, 140622849630207, +STORE, 140622849630208, 140622858022911, +STORE, 140622841237504, 140622849630207, +SNULL, 140622849634303, 140622858022911, +STORE, 140622849630208, 140622849634303, +STORE, 140622849634304, 140622858022911, +STORE, 140622430191616, 140622438584319, +SNULL, 140622430195711, 140622438584319, +STORE, 140622430191616, 140622430195711, +STORE, 140622430195712, 140622438584319, +SNULL, 140623361323007, 140623495540735, +STORE, 140623227105280, 140623361323007, +STORE, 140623361323008, 140623495540735, +SNULL, 140623361323008, 140623403286527, +STORE, 140623403286528, 140623495540735, +STORE, 140623361323008, 140623403286527, +ERASE, 140623361323008, 140623403286527, +SNULL, 140623470395391, 140623495540735, +STORE, 140623403286528, 140623470395391, +STORE, 140623470395392, 140623495540735, +ERASE, 140623470395392, 140623495540735, +SNULL, 140623227105280, 140623269068799, +STORE, 140623269068800, 140623361323007, +STORE, 140623227105280, 140623269068799, +ERASE, 140623227105280, 140623269068799, +SNULL, 140623084494848, 140623134851071, +STORE, 140623134851072, 140623218712575, +STORE, 140623084494848, 140623134851071, +ERASE, 140623084494848, 140623134851071, +SNULL, 140623201959935, 140623218712575, +STORE, 140623134851072, 140623201959935, +STORE, 140623201959936, 140623218712575, +ERASE, 140623201959936, 140623218712575, +SNULL, 140623067742207, 140623076102143, +STORE, 140623000633344, 140623067742207, +STORE, 140623067742208, 140623076102143, +ERASE, 140623067742208, 140623076102143, +STORE, 140622295973888, 140622430191615, +SNULL, 140622295973888, 140622329544703, +STORE, 140622329544704, 140622430191615, +STORE, 140622295973888, 140622329544703, +ERASE, 140622295973888, 140622329544703, +SNULL, 140622866550783, 140622933524479, +STORE, 140622866415616, 140622866550783, +STORE, 140622866550784, 140622933524479, +SNULL, 140622707019775, 140622841237503, +STORE, 140622438584320, 140622707019775, +STORE, 140622707019776, 140622841237503, +SNULL, 140622707019776, 140622732197887, +STORE, 140622732197888, 140622841237503, +STORE, 140622707019776, 140622732197887, +ERASE, 140622707019776, 140622732197887, +SNULL, 140622799306751, 140622841237503, +STORE, 140622732197888, 140622799306751, +STORE, 140622799306752, 140622841237503, +ERASE, 140622799306752, 140622841237503, +SNULL, 140622572802047, 140622707019775, +STORE, 140622438584320, 140622572802047, +STORE, 140622572802048, 140622707019775, +SNULL, 140622572802048, 140622597980159, +STORE, 140622597980160, 140622707019775, +STORE, 140622572802048, 140622597980159, +ERASE, 140622572802048, 140622597980159, +SNULL, 140622438584320, 140622463762431, +STORE, 140622463762432, 140622572802047, +STORE, 140622438584320, 140622463762431, +ERASE, 140622438584320, 140622463762431, +SNULL, 140622530871295, 140622572802047, +STORE, 140622463762432, 140622530871295, +STORE, 140622530871296, 140622572802047, +ERASE, 140622530871296, 140622572802047, +STORE, 140622195326976, 140622430191615, +SNULL, 140622262435839, 140622430191615, +STORE, 140622195326976, 140622262435839, +STORE, 140622262435840, 140622430191615, +SNULL, 140622262435840, 140622329544703, +STORE, 140622329544704, 140622430191615, +STORE, 140622262435840, 140622329544703, +ERASE, 140622262435840, 140622329544703, +SNULL, 140622841241599, 140622849630207, +STORE, 140622841237504, 140622841241599, +STORE, 140622841241600, 140622849630207, +STORE, 140623487148032, 140623520718847, +STORE, 140623478755328, 140623520718847, +SNULL, 140622941884416, 140622983847935, +STORE, 140622983847936, 140623000633343, +STORE, 140622941884416, 140622983847935, +SNULL, 140622983852031, 140623000633343, +STORE, 140622983847936, 140622983852031, +STORE, 140622983852032, 140623000633343, +STORE, 140623394893824, 140623403286527, +SNULL, 140623394897919, 140623403286527, +STORE, 140623394893824, 140623394897919, +STORE, 140623394897920, 140623403286527, +SNULL, 140623403421695, 140623470395391, +STORE, 140623403286528, 140623403421695, +STORE, 140623403421696, 140623470395391, +SNULL, 140623478755328, 140623503933439, +STORE, 140623503933440, 140623520718847, +STORE, 140623478755328, 140623503933439, +SNULL, 140623503937535, 140623520718847, +STORE, 140623503933440, 140623503937535, +STORE, 140623503937536, 140623520718847, +SNULL, 140623336177663, 140623361323007, +STORE, 140623269068800, 140623336177663, +STORE, 140623336177664, 140623361323007, +ERASE, 140623336177664, 140623361323007, +SNULL, 140623269203967, 140623336177663, +STORE, 140623269068800, 140623269203967, +STORE, 140623269203968, 140623336177663, +SNULL, 140623134986239, 140623201959935, +STORE, 140623134851072, 140623134986239, +STORE, 140623134986240, 140623201959935, +SNULL, 140623000768511, 140623067742207, +STORE, 140623000633344, 140623000768511, +STORE, 140623000768512, 140623067742207, +SNULL, 140622396653567, 140622430191615, +STORE, 140622329544704, 140622396653567, +STORE, 140622396653568, 140622430191615, +ERASE, 140622396653568, 140622430191615, +SNULL, 140622732333055, 140622799306751, +STORE, 140622732197888, 140622732333055, +STORE, 140622732333056, 140622799306751, +SNULL, 140622941884416, 140622975455231, +STORE, 140622975455232, 140622983847935, +STORE, 140622941884416, 140622975455231, +SNULL, 140622975459327, 140622983847935, +STORE, 140622975455232, 140622975459327, +STORE, 140622975459328, 140622983847935, +SNULL, 140622665089023, 140622707019775, +STORE, 140622597980160, 140622665089023, +STORE, 140622665089024, 140622707019775, +ERASE, 140622665089024, 140622707019775, +SNULL, 140622598115327, 140622665089023, +STORE, 140622597980160, 140622598115327, +STORE, 140622598115328, 140622665089023, +SNULL, 140622463897599, 140622530871295, +STORE, 140622463762432, 140622463897599, +STORE, 140622463897600, 140622530871295, +SNULL, 140622195462143, 140622262435839, +STORE, 140622195326976, 140622195462143, +STORE, 140622195462144, 140622262435839, +STORE, 140623386501120, 140623394893823, +SNULL, 140622941884416, 140622950277119, +STORE, 140622950277120, 140622975455231, +STORE, 140622941884416, 140622950277119, +SNULL, 140622950281215, 140622975455231, +STORE, 140622950277120, 140622950281215, +STORE, 140622950281216, 140622975455231, +SNULL, 140622941888511, 140622950277119, +STORE, 140622941884416, 140622941888511, +STORE, 140622941888512, 140622950277119, +STORE, 140623378108416, 140623394893823, +SNULL, 140623478755328, 140623495540735, +STORE, 140623495540736, 140623503933439, +STORE, 140623478755328, 140623495540735, +SNULL, 140623495544831, 140623503933439, +STORE, 140623495540736, 140623495544831, +STORE, 140623495544832, 140623503933439, +SNULL, 140623478755328, 140623487148031, +STORE, 140623487148032, 140623495540735, +STORE, 140623478755328, 140623487148031, +SNULL, 140623487152127, 140623495540735, +STORE, 140623487148032, 140623487152127, +STORE, 140623487152128, 140623495540735, +SNULL, 140623218716671, 140623227105279, +STORE, 140623218712576, 140623218716671, +STORE, 140623218716672, 140623227105279, +SNULL, 140623076106239, 140623084494847, +STORE, 140623076102144, 140623076106239, +STORE, 140623076106240, 140623084494847, +SNULL, 140622329679871, 140622396653567, +STORE, 140622329544704, 140622329679871, +STORE, 140622329679872, 140622396653567, +SNULL, 140622950281216, 140622958669823, +STORE, 140622958669824, 140622975455231, +STORE, 140622950281216, 140622958669823, +SNULL, 140622958673919, 140622975455231, +STORE, 140622958669824, 140622958673919, +STORE, 140622958673920, 140622975455231, +SNULL, 140623503937536, 140623512326143, +STORE, 140623512326144, 140623520718847, +STORE, 140623503937536, 140623512326143, +SNULL, 140623512330239, 140623520718847, +STORE, 140623512326144, 140623512330239, +STORE, 140623512330240, 140623520718847, +SNULL, 140623378108416, 140623386501119, +STORE, 140623386501120, 140623394893823, +STORE, 140623378108416, 140623386501119, +SNULL, 140623386505215, 140623394893823, +STORE, 140623386501120, 140623386505215, +STORE, 140623386505216, 140623394893823, +STORE, 140623369715712, 140623386501119, +STORE, 140623361323008, 140623386501119, +STORE, 140623352930304, 140623386501119, +SNULL, 140623352930304, 140623361323007, +STORE, 140623361323008, 140623386501119, +STORE, 140623352930304, 140623361323007, +SNULL, 140623361327103, 140623386501119, +STORE, 140623361323008, 140623361327103, +STORE, 140623361327104, 140623386501119, +SNULL, 140623478759423, 140623487148031, +STORE, 140623478755328, 140623478759423, +STORE, 140623478759424, 140623487148031, +STORE, 140623344537600, 140623361323007, +STORE, 140623260676096, 140623269068799, +SNULL, 140622958673920, 140622967062527, +STORE, 140622967062528, 140622975455231, +STORE, 140622958673920, 140622967062527, +SNULL, 140622967066623, 140622975455231, +STORE, 140622967062528, 140622967066623, +STORE, 140622967066624, 140622975455231, +STORE, 140623252283392, 140623269068799, +STORE, 140623243890688, 140623269068799, +SNULL, 140622983852032, 140622992240639, +STORE, 140622992240640, 140623000633343, +STORE, 140622983852032, 140622992240639, +SNULL, 140622992244735, 140623000633343, +STORE, 140622992240640, 140622992244735, +STORE, 140622992244736, 140623000633343, +STORE, 140623235497984, 140623269068799, +STORE, 140623218716672, 140623235497983, +STORE, 140623210319872, 140623218712575, +STORE, 140623126458368, 140623134851071, +SNULL, 140623210323967, 140623218712575, +STORE, 140623210319872, 140623210323967, +STORE, 140623210323968, 140623218712575, +SNULL, 140623218716672, 140623227105279, +STORE, 140623227105280, 140623235497983, +STORE, 140623218716672, 140623227105279, +SNULL, 140623227109375, 140623235497983, +STORE, 140623227105280, 140623227109375, +STORE, 140623227109376, 140623235497983, +STORE, 140623118065664, 140623134851071, +STORE, 140623109672960, 140623134851071, +SNULL, 140623109677055, 140623134851071, +STORE, 140623109672960, 140623109677055, +STORE, 140623109677056, 140623134851071, +STORE, 140623101280256, 140623109672959, +STORE, 140623092887552, 140623109672959, +SNULL, 140623092887552, 140623101280255, +STORE, 140623101280256, 140623109672959, +STORE, 140623092887552, 140623101280255, +SNULL, 140623101284351, 140623109672959, +STORE, 140623101280256, 140623101284351, +STORE, 140623101284352, 140623109672959, +SNULL, 140623361327104, 140623378108415, +STORE, 140623378108416, 140623386501119, +STORE, 140623361327104, 140623378108415, +SNULL, 140623378112511, 140623386501119, +STORE, 140623378108416, 140623378112511, +STORE, 140623378112512, 140623386501119, +SNULL, 140623235497984, 140623243890687, +STORE, 140623243890688, 140623269068799, +STORE, 140623235497984, 140623243890687, +SNULL, 140623243894783, 140623269068799, +STORE, 140623243890688, 140623243894783, +STORE, 140623243894784, 140623269068799, +SNULL, 140623361327104, 140623369715711, +STORE, 140623369715712, 140623378108415, +STORE, 140623361327104, 140623369715711, +SNULL, 140623369719807, 140623378108415, +STORE, 140623369715712, 140623369719807, +STORE, 140623369719808, 140623378108415, +SNULL, 140623243894784, 140623252283391, +STORE, 140623252283392, 140623269068799, +STORE, 140623243894784, 140623252283391, +SNULL, 140623252287487, 140623269068799, +STORE, 140623252283392, 140623252287487, +STORE, 140623252287488, 140623269068799, +SNULL, 140623235502079, 140623243890687, +STORE, 140623235497984, 140623235502079, +STORE, 140623235502080, 140623243890687, +SNULL, 140623344541695, 140623361323007, +STORE, 140623344537600, 140623344541695, +STORE, 140623344541696, 140623361323007, +STORE, 140623076106240, 140623092887551, +SNULL, 140623076106240, 140623084494847, +STORE, 140623084494848, 140623092887551, +STORE, 140623076106240, 140623084494847, +SNULL, 140623084498943, 140623092887551, +STORE, 140623084494848, 140623084498943, +STORE, 140623084498944, 140623092887551, +SNULL, 140623344541696, 140623352930303, +STORE, 140623352930304, 140623361323007, +STORE, 140623344541696, 140623352930303, +SNULL, 140623352934399, 140623361323007, +STORE, 140623352930304, 140623352934399, +STORE, 140623352934400, 140623361323007, +SNULL, 140623109677056, 140623118065663, +STORE, 140623118065664, 140623134851071, +STORE, 140623109677056, 140623118065663, +SNULL, 140623118069759, 140623134851071, +STORE, 140623118065664, 140623118069759, +STORE, 140623118069760, 140623134851071, +STORE, 140622832844800, 140622841237503, +STORE, 140622824452096, 140622841237503, +SNULL, 140622824452096, 140622832844799, +STORE, 140622832844800, 140622841237503, +STORE, 140622824452096, 140622832844799, +SNULL, 140622832848895, 140622841237503, +STORE, 140622832844800, 140622832848895, +STORE, 140622832848896, 140622841237503, +STORE, 140622816059392, 140622832844799, +SNULL, 140623092891647, 140623101280255, +STORE, 140623092887552, 140623092891647, +STORE, 140623092891648, 140623101280255, +SNULL, 140623118069760, 140623126458367, +STORE, 140623126458368, 140623134851071, +STORE, 140623118069760, 140623126458367, +SNULL, 140623126462463, 140623134851071, +STORE, 140623126458368, 140623126462463, +STORE, 140623126462464, 140623134851071, +SNULL, 140623252287488, 140623260676095, +STORE, 140623260676096, 140623269068799, +STORE, 140623252287488, 140623260676095, +SNULL, 140623260680191, 140623269068799, +STORE, 140623260676096, 140623260680191, +STORE, 140623260680192, 140623269068799, +STORE, 140622807666688, 140622832844799, +STORE, 140622723805184, 140622732197887, +STORE, 140622715412480, 140622732197887, +STORE, 140622707019776, 140622732197887, +SNULL, 140622707023871, 140622732197887, +STORE, 140622707019776, 140622707023871, +STORE, 140622707023872, 140622732197887, +STORE, 140622698627072, 140622707019775, +STORE, 140622690234368, 140622707019775, +SNULL, 140622690238463, 140622707019775, +STORE, 140622690234368, 140622690238463, +STORE, 140622690238464, 140622707019775, +SNULL, 140622807666688, 140622816059391, +STORE, 140622816059392, 140622832844799, +STORE, 140622807666688, 140622816059391, +SNULL, 140622816063487, 140622832844799, +STORE, 140622816059392, 140622816063487, +STORE, 140622816063488, 140622832844799, +STORE, 140622681841664, 140622690234367, +STORE, 140622673448960, 140622690234367, +SNULL, 140622673453055, 140622690234367, +STORE, 140622673448960, 140622673453055, +STORE, 140622673453056, 140622690234367, +STORE, 140622589587456, 140622597980159, +SNULL, 140622807670783, 140622816059391, +STORE, 140622807666688, 140622807670783, +STORE, 140622807670784, 140622816059391, +STORE, 140622581194752, 140622597980159, +SNULL, 140622581198847, 140622597980159, +STORE, 140622581194752, 140622581198847, +STORE, 140622581198848, 140622597980159, +SNULL, 140622816063488, 140622824452095, +STORE, 140622824452096, 140622832844799, +STORE, 140622816063488, 140622824452095, +SNULL, 140622824456191, 140622832844799, +STORE, 140622824452096, 140622824456191, +STORE, 140622824456192, 140622832844799, +STORE, 140622572802048, 140622581194751, +SNULL, 140622572806143, 140622581194751, +STORE, 140622572802048, 140622572806143, +STORE, 140622572806144, 140622581194751, +STORE, 140622564409344, 140622572802047, +STORE, 140622556016640, 140622572802047, +SNULL, 140622556016640, 140622564409343, +STORE, 140622564409344, 140622572802047, +STORE, 140622556016640, 140622564409343, +SNULL, 140622564413439, 140622572802047, +STORE, 140622564409344, 140622564413439, +STORE, 140622564413440, 140622572802047, +SNULL, 140622690238464, 140622698627071, +STORE, 140622698627072, 140622707019775, +STORE, 140622690238464, 140622698627071, +SNULL, 140622698631167, 140622707019775, +STORE, 140622698627072, 140622698631167, +STORE, 140622698631168, 140622707019775, +SNULL, 140622707023872, 140622723805183, +STORE, 140622723805184, 140622732197887, +STORE, 140622707023872, 140622723805183, +SNULL, 140622723809279, 140622732197887, +STORE, 140622723805184, 140622723809279, +STORE, 140622723809280, 140622732197887, +SNULL, 140622707023872, 140622715412479, +STORE, 140622715412480, 140622723805183, +STORE, 140622707023872, 140622715412479, +SNULL, 140622715416575, 140622723805183, +STORE, 140622715412480, 140622715416575, +STORE, 140622715416576, 140622723805183, +STORE, 140622547623936, 140622564409343, +SNULL, 140622547628031, 140622564409343, +STORE, 140622547623936, 140622547628031, +STORE, 140622547628032, 140622564409343, +STORE, 140622539231232, 140622547623935, +SNULL, 140622539235327, 140622547623935, +STORE, 140622539231232, 140622539235327, +STORE, 140622539235328, 140622547623935, +SNULL, 140622581198848, 140622589587455, +STORE, 140622589587456, 140622597980159, +STORE, 140622581198848, 140622589587455, +SNULL, 140622589591551, 140622597980159, +STORE, 140622589587456, 140622589591551, +STORE, 140622589591552, 140622597980159, +STORE, 140622455369728, 140622463762431, +SNULL, 140622455373823, 140622463762431, +STORE, 140622455369728, 140622455373823, +STORE, 140622455373824, 140622463762431, +STORE, 140622446977024, 140622455369727, +SNULL, 140622446981119, 140622455369727, +STORE, 140622446977024, 140622446981119, +STORE, 140622446981120, 140622455369727, +SNULL, 140622547628032, 140622556016639, +STORE, 140622556016640, 140622564409343, +STORE, 140622547628032, 140622556016639, +SNULL, 140622556020735, 140622564409343, +STORE, 140622556016640, 140622556020735, +STORE, 140622556020736, 140622564409343, +STORE, 140622430195712, 140622446977023, +STORE, 140622421798912, 140622430191615, +SNULL, 140622430195712, 140622438584319, +STORE, 140622438584320, 140622446977023, +STORE, 140622430195712, 140622438584319, +SNULL, 140622438588415, 140622446977023, +STORE, 140622438584320, 140622438588415, +STORE, 140622438588416, 140622446977023, +STORE, 140622413406208, 140622430191615, +STORE, 140622405013504, 140622430191615, +SNULL, 140622405013504, 140622413406207, +STORE, 140622413406208, 140622430191615, +STORE, 140622405013504, 140622413406207, +SNULL, 140622413410303, 140622430191615, +STORE, 140622413406208, 140622413410303, +STORE, 140622413410304, 140622430191615, +SNULL, 140622673453056, 140622681841663, +STORE, 140622681841664, 140622690234367, +STORE, 140622673453056, 140622681841663, +SNULL, 140622681845759, 140622690234367, +STORE, 140622681841664, 140622681845759, +STORE, 140622681845760, 140622690234367, +STORE, 140622321152000, 140622329544703, +SNULL, 140622413410304, 140622421798911, +STORE, 140622421798912, 140622430191615, +STORE, 140622413410304, 140622421798911, +SNULL, 140622421803007, 140622430191615, +STORE, 140622421798912, 140622421803007, +STORE, 140622421803008, 140622430191615, +STORE, 140622312759296, 140622329544703, +SNULL, 140622312763391, 140622329544703, +STORE, 140622312759296, 140622312763391, +STORE, 140622312763392, 140622329544703, +SNULL, 140622405017599, 140622413406207, +STORE, 140622405013504, 140622405017599, +STORE, 140622405017600, 140622413406207, +STORE, 140622304366592, 140622312759295, +SNULL, 140622304370687, 140622312759295, +STORE, 140622304366592, 140622304370687, +STORE, 140622304370688, 140622312759295, +SNULL, 140622312763392, 140622321151999, +STORE, 140622321152000, 140622329544703, +STORE, 140622312763392, 140622321151999, +SNULL, 140622321156095, 140622329544703, +STORE, 140622321152000, 140622321156095, +STORE, 140622321156096, 140622329544703, +STORE, 140624062619648, 140624062648319, +STORE, 140624010240000, 140624012431359, +SNULL, 140624010240000, 140624010330111, +STORE, 140624010330112, 140624012431359, +STORE, 140624010240000, 140624010330111, +SNULL, 140624012423167, 140624012431359, +STORE, 140624010330112, 140624012423167, +STORE, 140624012423168, 140624012431359, +ERASE, 140624012423168, 140624012431359, +STORE, 140624012423168, 140624012431359, +SNULL, 140624012427263, 140624012431359, +STORE, 140624012423168, 140624012427263, +STORE, 140624012427264, 140624012431359, +ERASE, 140624062619648, 140624062648319, +ERASE, 140622849630208, 140622849634303, +ERASE, 140622849634304, 140622858022911, +ERASE, 140623394893824, 140623394897919, +ERASE, 140623394897920, 140623403286527, +ERASE, 140623361323008, 140623361327103, +ERASE, 140623361327104, 140623369715711, +ERASE, 140623084494848, 140623084498943, +ERASE, 140623084498944, 140623092887551, +ERASE, 140623931764736, 140623931768831, +ERASE, 140623931768832, 140623940157439, +ERASE, 140622841237504, 140622841241599, +ERASE, 140622841241600, 140622849630207, +ERASE, 140623487148032, 140623487152127, +ERASE, 140623487152128, 140623495540735, +ERASE, 140623109672960, 140623109677055, +ERASE, 140623109677056, 140623118065663, +ERASE, 140622983847936, 140622983852031, +ERASE, 140622983852032, 140622992240639, +ERASE, 140623352930304, 140623352934399, +ERASE, 140623352934400, 140623361323007, +ERASE, 140622564409344, 140622564413439, +ERASE, 140622564413440, 140622572802047, +ERASE, 140622430191616, 140622430195711, +ERASE, 140622430195712, 140622438584319, +ERASE, 140622958669824, 140622958673919, +ERASE, 140622958673920, 140622967062527, +ERASE, 140622992240640, 140622992244735, +ERASE, 140622992244736, 140623000633343, +ERASE, 140623227105280, 140623227109375, +ERASE, 140623227109376, 140623235497983, +ERASE, 140622321152000, 140622321156095, +ERASE, 140622321156096, 140622329544703, +ERASE, 140622858022912, 140622858027007, +ERASE, 140622858027008, 140622866415615, +ERASE, 140622975455232, 140622975459327, +ERASE, 140622975459328, 140622983847935, +ERASE, 140623378108416, 140623378112511, +ERASE, 140623378112512, 140623386501119, +ERASE, 140623495540736, 140623495544831, +ERASE, 140623495544832, 140623503933439, +ERASE, 140623118065664, 140623118069759, +ERASE, 140623118069760, 140623126458367, +ERASE, 140622572802048, 140622572806143, +ERASE, 140622572806144, 140622581194751, +ERASE, 140622421798912, 140622421803007, +ERASE, 140622421803008, 140622430191615, +ERASE, 140622967062528, 140622967066623, +ERASE, 140622967066624, 140622975455231, +ERASE, 140623252283392, 140623252287487, +ERASE, 140623252287488, 140623260676095, +ERASE, 140622673448960, 140622673453055, +ERASE, 140622673453056, 140622681841663, +ERASE, 140623076102144, 140623076106239, +ERASE, 140623076106240, 140623084494847, +ERASE, 140623101280256, 140623101284351, +ERASE, 140623101284352, 140623109672959, +ERASE, 140622715412480, 140622715416575, +ERASE, 140622715416576, 140622723805183, +ERASE, 140622405013504, 140622405017599, +ERASE, 140622405017600, 140622413406207, +ERASE, 140623478755328, 140623478759423, +ERASE, 140623478759424, 140623487148031, +ERASE, 140623906586624, 140623906590719, +ERASE, 140623906590720, 140623914979327, +ERASE, 140622950277120, 140622950281215, +ERASE, 140622950281216, 140622958669823, + }; + unsigned long set32[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140731244212224, 140737488351231, +SNULL, 140731244216319, 140737488351231, +STORE, 140731244212224, 140731244216319, +STORE, 140731244081152, 140731244216319, +STORE, 94427773984768, 94427776237567, +SNULL, 94427774115839, 94427776237567, +STORE, 94427773984768, 94427774115839, +STORE, 94427774115840, 94427776237567, +ERASE, 94427774115840, 94427776237567, +STORE, 94427776208896, 94427776217087, +STORE, 94427776217088, 94427776237567, +STORE, 140401464893440, 140401467146239, +SNULL, 140401465036799, 140401467146239, +STORE, 140401464893440, 140401465036799, +STORE, 140401465036800, 140401467146239, +ERASE, 140401465036800, 140401467146239, +STORE, 140401467133952, 140401467142143, +STORE, 140401467142144, 140401467146239, +STORE, 140731244507136, 140731244511231, +STORE, 140731244494848, 140731244507135, +STORE, 140401467105280, 140401467133951, +STORE, 140401467097088, 140401467105279, +STORE, 140401462677504, 140401464893439, +SNULL, 140401462677504, 140401462775807, +STORE, 140401462775808, 140401464893439, +STORE, 140401462677504, 140401462775807, +SNULL, 140401464868863, 140401464893439, +STORE, 140401462775808, 140401464868863, +STORE, 140401464868864, 140401464893439, +SNULL, 140401464868864, 140401464877055, +STORE, 140401464877056, 140401464893439, +STORE, 140401464868864, 140401464877055, +ERASE, 140401464868864, 140401464877055, +STORE, 140401464868864, 140401464877055, +ERASE, 140401464877056, 140401464893439, +STORE, 140401464877056, 140401464893439, +STORE, 140401458880512, 140401462677503, +SNULL, 140401458880512, 140401460539391, +STORE, 140401460539392, 140401462677503, +STORE, 140401458880512, 140401460539391, +SNULL, 140401462636543, 140401462677503, +STORE, 140401460539392, 140401462636543, +STORE, 140401462636544, 140401462677503, +SNULL, 140401462636544, 140401462661119, +STORE, 140401462661120, 140401462677503, +STORE, 140401462636544, 140401462661119, +ERASE, 140401462636544, 140401462661119, +STORE, 140401462636544, 140401462661119, +ERASE, 140401462661120, 140401462677503, +STORE, 140401462661120, 140401462677503, +STORE, 140401467088896, 140401467105279, +SNULL, 140401462652927, 140401462661119, +STORE, 140401462636544, 140401462652927, +STORE, 140401462652928, 140401462661119, +SNULL, 140401464872959, 140401464877055, +STORE, 140401464868864, 140401464872959, +STORE, 140401464872960, 140401464877055, +SNULL, 94427776212991, 94427776217087, +STORE, 94427776208896, 94427776212991, +STORE, 94427776212992, 94427776217087, +SNULL, 140401467138047, 140401467142143, +STORE, 140401467133952, 140401467138047, +STORE, 140401467138048, 140401467142143, +ERASE, 140401467105280, 140401467133951, +STORE, 94427784683520, 94427784818687, +STORE, 140401450487808, 140401458880511, +SNULL, 140401450491903, 140401458880511, +STORE, 140401450487808, 140401450491903, +STORE, 140401450491904, 140401458880511, +STORE, 140401442095104, 140401450487807, +STORE, 140401307877376, 140401442095103, +SNULL, 140401307877376, 140401340055551, +STORE, 140401340055552, 140401442095103, +STORE, 140401307877376, 140401340055551, +ERASE, 140401307877376, 140401340055551, +SNULL, 140401407164415, 140401442095103, +STORE, 140401340055552, 140401407164415, +STORE, 140401407164416, 140401442095103, +ERASE, 140401407164416, 140401442095103, +SNULL, 140401340190719, 140401407164415, +STORE, 140401340055552, 140401340190719, +STORE, 140401340190720, 140401407164415, +SNULL, 140401442099199, 140401450487807, +STORE, 140401442095104, 140401442099199, +STORE, 140401442099200, 140401450487807, +STORE, 140401433702400, 140401442095103, +SNULL, 140401433706495, 140401442095103, +STORE, 140401433702400, 140401433706495, +STORE, 140401433706496, 140401442095103, +STORE, 140401425309696, 140401433702399, +SNULL, 140401425313791, 140401433702399, +STORE, 140401425309696, 140401425313791, +STORE, 140401425313792, 140401433702399, +STORE, 140401416916992, 140401425309695, +SNULL, 140401416921087, 140401425309695, +STORE, 140401416916992, 140401416921087, +STORE, 140401416921088, 140401425309695, +STORE, 140401408524288, 140401416916991, +STORE, 140401205837824, 140401340055551, +SNULL, 140401272946687, 140401340055551, +STORE, 140401205837824, 140401272946687, +STORE, 140401272946688, 140401340055551, +ERASE, 140401272946688, 140401340055551, +SNULL, 140401205972991, 140401272946687, +STORE, 140401205837824, 140401205972991, +STORE, 140401205972992, 140401272946687, +STORE, 140401331662848, 140401340055551, +STORE, 140401323270144, 140401340055551, +STORE, 140401138728960, 140401205837823, +STORE, 140401314877440, 140401340055551, +SNULL, 140401408528383, 140401416916991, +STORE, 140401408524288, 140401408528383, +STORE, 140401408528384, 140401416916991, +SNULL, 140401138864127, 140401205837823, +STORE, 140401138728960, 140401138864127, +STORE, 140401138864128, 140401205837823, +STORE, 140401004511232, 140401138728959, +SNULL, 140401071620095, 140401138728959, +STORE, 140401004511232, 140401071620095, +STORE, 140401071620096, 140401138728959, +ERASE, 140401071620096, 140401138728959, +STORE, 140400870293504, 140401071620095, +SNULL, 140400937402367, 140401071620095, +STORE, 140400870293504, 140400937402367, +STORE, 140400937402368, 140401071620095, +SNULL, 140400937402368, 140401004511231, +STORE, 140401004511232, 140401071620095, +STORE, 140400937402368, 140401004511231, +ERASE, 140400937402368, 140401004511231, +STORE, 140401306484736, 140401340055551, +SNULL, 140401306484736, 140401323270143, +STORE, 140401323270144, 140401340055551, +STORE, 140401306484736, 140401323270143, +SNULL, 140401323274239, 140401340055551, +STORE, 140401323270144, 140401323274239, +STORE, 140401323274240, 140401340055551, +SNULL, 140401004646399, 140401071620095, +STORE, 140401004511232, 140401004646399, +STORE, 140401004646400, 140401071620095, +SNULL, 140400870428671, 140400937402367, +STORE, 140400870293504, 140400870428671, +STORE, 140400870428672, 140400937402367, +SNULL, 140401306488831, 140401323270143, +STORE, 140401306484736, 140401306488831, +STORE, 140401306488832, 140401323270143, +STORE, 140401298092032, 140401306484735, +SNULL, 140401306488832, 140401314877439, +STORE, 140401314877440, 140401323270143, +STORE, 140401306488832, 140401314877439, +SNULL, 140401314881535, 140401323270143, +STORE, 140401314877440, 140401314881535, +STORE, 140401314881536, 140401323270143, +SNULL, 140401323274240, 140401331662847, +STORE, 140401331662848, 140401340055551, +STORE, 140401323274240, 140401331662847, +SNULL, 140401331666943, 140401340055551, +STORE, 140401331662848, 140401331666943, +STORE, 140401331666944, 140401340055551, +SNULL, 140401298096127, 140401306484735, +STORE, 140401298092032, 140401298096127, +STORE, 140401298096128, 140401306484735, +STORE, 140401289699328, 140401298092031, +STORE, 140401281306624, 140401298092031, +STORE, 140401130336256, 140401138728959, +SNULL, 140401281306624, 140401289699327, +STORE, 140401289699328, 140401298092031, +STORE, 140401281306624, 140401289699327, +SNULL, 140401289703423, 140401298092031, +STORE, 140401289699328, 140401289703423, +STORE, 140401289703424, 140401298092031, +STORE, 140401121943552, 140401138728959, +STORE, 140401113550848, 140401138728959, +SNULL, 140401281310719, 140401289699327, +STORE, 140401281306624, 140401281310719, +STORE, 140401281310720, 140401289699327, +SNULL, 140401113550848, 140401121943551, +STORE, 140401121943552, 140401138728959, +STORE, 140401113550848, 140401121943551, +SNULL, 140401121947647, 140401138728959, +STORE, 140401121943552, 140401121947647, +STORE, 140401121947648, 140401138728959, +STORE, 140401105158144, 140401121943551, +SNULL, 140401121947648, 140401130336255, +STORE, 140401130336256, 140401138728959, +STORE, 140401121947648, 140401130336255, +SNULL, 140401130340351, 140401138728959, +STORE, 140401130336256, 140401130340351, +STORE, 140401130340352, 140401138728959, +STORE, 140401096765440, 140401121943551, +SNULL, 140401096765440, 140401113550847, +STORE, 140401113550848, 140401121943551, +STORE, 140401096765440, 140401113550847, +SNULL, 140401113554943, 140401121943551, +STORE, 140401113550848, 140401113554943, +STORE, 140401113554944, 140401121943551, +STORE, 140401088372736, 140401113550847, +SNULL, 140401088372736, 140401096765439, +STORE, 140401096765440, 140401113550847, +STORE, 140401088372736, 140401096765439, +SNULL, 140401096769535, 140401113550847, +STORE, 140401096765440, 140401096769535, +STORE, 140401096769536, 140401113550847, +SNULL, 140401096769536, 140401105158143, +STORE, 140401105158144, 140401113550847, +STORE, 140401096769536, 140401105158143, +SNULL, 140401105162239, 140401113550847, +STORE, 140401105158144, 140401105162239, +STORE, 140401105162240, 140401113550847, +SNULL, 140401088376831, 140401096765439, +STORE, 140401088372736, 140401088376831, +STORE, 140401088376832, 140401096765439, +STORE, 140401079980032, 140401088372735, +STORE, 140400996118528, 140401004511231, +SNULL, 140401079984127, 140401088372735, +STORE, 140401079980032, 140401079984127, +STORE, 140401079984128, 140401088372735, +SNULL, 140400996122623, 140401004511231, +STORE, 140400996118528, 140400996122623, +STORE, 140400996122624, 140401004511231, +STORE, 140400987725824, 140400996118527, +STORE, 140400979333120, 140400996118527, +STORE, 140400803184640, 140400870293503, +SNULL, 140400803319807, 140400870293503, +STORE, 140400803184640, 140400803319807, +STORE, 140400803319808, 140400870293503, +SNULL, 140400979333120, 140400987725823, +STORE, 140400987725824, 140400996118527, +STORE, 140400979333120, 140400987725823, +SNULL, 140400987729919, 140400996118527, +STORE, 140400987725824, 140400987729919, +STORE, 140400987729920, 140400996118527, +STORE, 140400970940416, 140400987725823, +STORE, 140400962547712, 140400987725823, +STORE, 140400668966912, 140400803184639, +STORE, 140400954155008, 140400987725823, +STORE, 140400945762304, 140400987725823, +STORE, 140400660574208, 140400668966911, +STORE, 140400593465344, 140400660574207, +STORE, 140400585072640, 140400593465343, +STORE, 140400450854912, 140400585072639, +STORE, 140400442462208, 140400450854911, +STORE, 140400434069504, 140400450854911, +STORE, 140400299851776, 140400434069503, +STORE, 140400291459072, 140400299851775, +SNULL, 140400299851776, 140400333422591, +STORE, 140400333422592, 140400434069503, +STORE, 140400299851776, 140400333422591, +ERASE, 140400299851776, 140400333422591, +STORE, 140400325029888, 140400333422591, +STORE, 140400157241344, 140400291459071, +STORE, 140400316637184, 140400333422591, +STORE, 140400308244480, 140400333422591, +STORE, 140400023023616, 140400291459071, +STORE, 140400291459072, 140400333422591, +SNULL, 140400023023616, 140400064987135, +STORE, 140400064987136, 140400291459071, +STORE, 140400023023616, 140400064987135, +ERASE, 140400023023616, 140400064987135, +STORE, 140400056594432, 140400064987135, +SNULL, 140400056598527, 140400064987135, +STORE, 140400056594432, 140400056598527, +STORE, 140400056598528, 140400064987135, +STORE, 140399989485568, 140400056594431, +SNULL, 140400291459072, 140400316637183, +STORE, 140400316637184, 140400333422591, +STORE, 140400291459072, 140400316637183, +SNULL, 140400316641279, 140400333422591, +STORE, 140400316637184, 140400316641279, +STORE, 140400316641280, 140400333422591, +STORE, 140399855267840, 140400056594431, +SNULL, 140399855267840, 140399863660543, +STORE, 140399863660544, 140400056594431, +STORE, 140399855267840, 140399863660543, +ERASE, 140399855267840, 140399863660543, +SNULL, 140400736075775, 140400803184639, +STORE, 140400668966912, 140400736075775, +STORE, 140400736075776, 140400803184639, +ERASE, 140400736075776, 140400803184639, +SNULL, 140400669102079, 140400736075775, +STORE, 140400668966912, 140400669102079, +STORE, 140400669102080, 140400736075775, +STORE, 140400669102080, 140400803184639, +SNULL, 140400669102080, 140400736075775, +STORE, 140400736075776, 140400803184639, +STORE, 140400669102080, 140400736075775, +SNULL, 140400736210943, 140400803184639, +STORE, 140400736075776, 140400736210943, +STORE, 140400736210944, 140400803184639, +ERASE, 140400593465344, 140400660574207, +SNULL, 140400450854912, 140400467640319, +STORE, 140400467640320, 140400585072639, +STORE, 140400450854912, 140400467640319, +ERASE, 140400450854912, 140400467640319, +STORE, 140399729442816, 140400056594431, +SNULL, 140400400531455, 140400434069503, +STORE, 140400333422592, 140400400531455, +STORE, 140400400531456, 140400434069503, +ERASE, 140400400531456, 140400434069503, +SNULL, 140400333557759, 140400400531455, +STORE, 140400333422592, 140400333557759, +STORE, 140400333557760, 140400400531455, +SNULL, 140400157241343, 140400291459071, +STORE, 140400064987136, 140400157241343, +STORE, 140400157241344, 140400291459071, +SNULL, 140400157241344, 140400199204863, +STORE, 140400199204864, 140400291459071, +STORE, 140400157241344, 140400199204863, +ERASE, 140400157241344, 140400199204863, +SNULL, 140400266313727, 140400291459071, +STORE, 140400199204864, 140400266313727, +STORE, 140400266313728, 140400291459071, +ERASE, 140400266313728, 140400291459071, +SNULL, 140400132095999, 140400157241343, +STORE, 140400064987136, 140400132095999, +STORE, 140400132096000, 140400157241343, +ERASE, 140400132096000, 140400157241343, +SNULL, 140400065122303, 140400132095999, +STORE, 140400064987136, 140400065122303, +STORE, 140400065122304, 140400132095999, +SNULL, 140400945762304, 140400954155007, +STORE, 140400954155008, 140400987725823, +STORE, 140400945762304, 140400954155007, +SNULL, 140400954159103, 140400987725823, +STORE, 140400954155008, 140400954159103, +STORE, 140400954159104, 140400987725823, +SNULL, 140400434069504, 140400442462207, +STORE, 140400442462208, 140400450854911, +STORE, 140400434069504, 140400442462207, +SNULL, 140400442466303, 140400450854911, +STORE, 140400442462208, 140400442466303, +STORE, 140400442466304, 140400450854911, +SNULL, 140400291463167, 140400316637183, +STORE, 140400291459072, 140400291463167, +STORE, 140400291463168, 140400316637183, +STORE, 140400652181504, 140400668966911, +STORE, 140400643788800, 140400668966911, +SNULL, 140400291463168, 140400299851775, +STORE, 140400299851776, 140400316637183, +STORE, 140400291463168, 140400299851775, +SNULL, 140400299855871, 140400316637183, +STORE, 140400299851776, 140400299855871, +STORE, 140400299855872, 140400316637183, +STORE, 140400635396096, 140400668966911, +SNULL, 140400635396096, 140400643788799, +STORE, 140400643788800, 140400668966911, +STORE, 140400635396096, 140400643788799, +SNULL, 140400643792895, 140400668966911, +STORE, 140400643788800, 140400643792895, +STORE, 140400643792896, 140400668966911, +SNULL, 140399989485567, 140400056594431, +STORE, 140399729442816, 140399989485567, +STORE, 140399989485568, 140400056594431, +ERASE, 140399989485568, 140400056594431, +SNULL, 140399930769407, 140399989485567, +STORE, 140399729442816, 140399930769407, +STORE, 140399930769408, 140399989485567, +ERASE, 140399930769408, 140399989485567, +SNULL, 140400945766399, 140400954155007, +STORE, 140400945762304, 140400945766399, +STORE, 140400945766400, 140400954155007, +SNULL, 140400534749183, 140400585072639, +STORE, 140400467640320, 140400534749183, +STORE, 140400534749184, 140400585072639, +ERASE, 140400534749184, 140400585072639, +SNULL, 140399796551679, 140399930769407, +STORE, 140399729442816, 140399796551679, +STORE, 140399796551680, 140399930769407, +SNULL, 140399796551680, 140399863660543, +STORE, 140399863660544, 140399930769407, +STORE, 140399796551680, 140399863660543, +ERASE, 140399796551680, 140399863660543, +SNULL, 140400199340031, 140400266313727, +STORE, 140400199204864, 140400199340031, +STORE, 140400199340032, 140400266313727, +STORE, 140400627003392, 140400643788799, +SNULL, 140400316641280, 140400325029887, +STORE, 140400325029888, 140400333422591, +STORE, 140400316641280, 140400325029887, +SNULL, 140400325033983, 140400333422591, +STORE, 140400325029888, 140400325033983, +STORE, 140400325033984, 140400333422591, +SNULL, 140400627003392, 140400635396095, +STORE, 140400635396096, 140400643788799, +STORE, 140400627003392, 140400635396095, +SNULL, 140400635400191, 140400643788799, +STORE, 140400635396096, 140400635400191, +STORE, 140400635400192, 140400643788799, +SNULL, 140400434073599, 140400442462207, +STORE, 140400434069504, 140400434073599, +STORE, 140400434073600, 140400442462207, +STORE, 140400618610688, 140400635396095, +STORE, 140400610217984, 140400635396095, +SNULL, 140400954159104, 140400962547711, +STORE, 140400962547712, 140400987725823, +STORE, 140400954159104, 140400962547711, +SNULL, 140400962551807, 140400987725823, +STORE, 140400962547712, 140400962551807, +STORE, 140400962551808, 140400987725823, +SNULL, 140400299855872, 140400308244479, +STORE, 140400308244480, 140400316637183, +STORE, 140400299855872, 140400308244479, +SNULL, 140400308248575, 140400316637183, +STORE, 140400308244480, 140400308248575, +STORE, 140400308248576, 140400316637183, +STORE, 140400601825280, 140400635396095, +SNULL, 140400601829375, 140400635396095, +STORE, 140400601825280, 140400601829375, +STORE, 140400601829376, 140400635396095, +STORE, 140400576679936, 140400593465343, +SNULL, 140400576684031, 140400593465343, +STORE, 140400576679936, 140400576684031, +STORE, 140400576684032, 140400593465343, +SNULL, 140400643792896, 140400652181503, +STORE, 140400652181504, 140400668966911, +STORE, 140400643792896, 140400652181503, +SNULL, 140400652185599, 140400668966911, +STORE, 140400652181504, 140400652185599, +STORE, 140400652185600, 140400668966911, +STORE, 140399595225088, 140399796551679, +SNULL, 140399662333951, 140399796551679, +STORE, 140399595225088, 140399662333951, +STORE, 140399662333952, 140399796551679, +SNULL, 140399662333952, 140399729442815, +STORE, 140399729442816, 140399796551679, +STORE, 140399662333952, 140399729442815, +ERASE, 140399662333952, 140399729442815, +SNULL, 140399863795711, 140399930769407, +STORE, 140399863660544, 140399863795711, +STORE, 140399863795712, 140399930769407, +STORE, 140400568287232, 140400576679935, +SNULL, 140400568291327, 140400576679935, +STORE, 140400568287232, 140400568291327, +STORE, 140400568291328, 140400576679935, +SNULL, 140400467775487, 140400534749183, +STORE, 140400467640320, 140400467775487, +STORE, 140400467775488, 140400534749183, +SNULL, 140399729577983, 140399796551679, +STORE, 140399729442816, 140399729577983, +STORE, 140399729577984, 140399796551679, +SNULL, 140400601829376, 140400627003391, +STORE, 140400627003392, 140400635396095, +STORE, 140400601829376, 140400627003391, +SNULL, 140400627007487, 140400635396095, +STORE, 140400627003392, 140400627007487, +STORE, 140400627007488, 140400635396095, +STORE, 140400559894528, 140400568287231, +STORE, 140400551501824, 140400568287231, +STORE, 140400543109120, 140400568287231, +STORE, 140400459247616, 140400467640319, +STORE, 140400442466304, 140400467640319, +SNULL, 140399595360255, 140399662333951, +STORE, 140399595225088, 140399595360255, +STORE, 140399595360256, 140399662333951, +SNULL, 140400962551808, 140400970940415, +STORE, 140400970940416, 140400987725823, +STORE, 140400962551808, 140400970940415, +SNULL, 140400970944511, 140400987725823, +STORE, 140400970940416, 140400970944511, +STORE, 140400970944512, 140400987725823, +SNULL, 140400652185600, 140400660574207, +STORE, 140400660574208, 140400668966911, +STORE, 140400652185600, 140400660574207, +SNULL, 140400660578303, 140400668966911, +STORE, 140400660574208, 140400660578303, +STORE, 140400660578304, 140400668966911, +SNULL, 140400576684032, 140400585072639, +STORE, 140400585072640, 140400593465343, +STORE, 140400576684032, 140400585072639, +SNULL, 140400585076735, 140400593465343, +STORE, 140400585072640, 140400585076735, +STORE, 140400585076736, 140400593465343, +STORE, 140400425676800, 140400434069503, +STORE, 140400417284096, 140400434069503, +STORE, 140400408891392, 140400434069503, +SNULL, 140400408891392, 140400417284095, +STORE, 140400417284096, 140400434069503, +STORE, 140400408891392, 140400417284095, +SNULL, 140400417288191, 140400434069503, +STORE, 140400417284096, 140400417288191, +STORE, 140400417288192, 140400434069503, +STORE, 140400283066368, 140400291459071, +SNULL, 140400601829376, 140400618610687, +STORE, 140400618610688, 140400627003391, +STORE, 140400601829376, 140400618610687, +SNULL, 140400618614783, 140400627003391, +STORE, 140400618610688, 140400618614783, +STORE, 140400618614784, 140400627003391, +SNULL, 140400601829376, 140400610217983, +STORE, 140400610217984, 140400618610687, +STORE, 140400601829376, 140400610217983, +SNULL, 140400610222079, 140400618610687, +STORE, 140400610217984, 140400610222079, +STORE, 140400610222080, 140400618610687, +STORE, 140400274673664, 140400291459071, +STORE, 140400190812160, 140400199204863, +STORE, 140400182419456, 140400199204863, +SNULL, 140400442466304, 140400450854911, +STORE, 140400450854912, 140400467640319, +STORE, 140400442466304, 140400450854911, +SNULL, 140400450859007, 140400467640319, +STORE, 140400450854912, 140400450859007, +STORE, 140400450859008, 140400467640319, +SNULL, 140400543109120, 140400559894527, +STORE, 140400559894528, 140400568287231, +STORE, 140400543109120, 140400559894527, +SNULL, 140400559898623, 140400568287231, +STORE, 140400559894528, 140400559898623, +STORE, 140400559898624, 140400568287231, +SNULL, 140400450859008, 140400459247615, +STORE, 140400459247616, 140400467640319, +STORE, 140400450859008, 140400459247615, +SNULL, 140400459251711, 140400467640319, +STORE, 140400459247616, 140400459251711, +STORE, 140400459251712, 140400467640319, +SNULL, 140400543113215, 140400559894527, +STORE, 140400543109120, 140400543113215, +STORE, 140400543113216, 140400559894527, +SNULL, 140400970944512, 140400979333119, +STORE, 140400979333120, 140400987725823, +STORE, 140400970944512, 140400979333119, +SNULL, 140400979337215, 140400987725823, +STORE, 140400979333120, 140400979337215, +STORE, 140400979337216, 140400987725823, +STORE, 140400174026752, 140400199204863, +SNULL, 140400174030847, 140400199204863, +STORE, 140400174026752, 140400174030847, +STORE, 140400174030848, 140400199204863, +SNULL, 140400274673664, 140400283066367, +STORE, 140400283066368, 140400291459071, +STORE, 140400274673664, 140400283066367, +SNULL, 140400283070463, 140400291459071, +STORE, 140400283066368, 140400283070463, +STORE, 140400283070464, 140400291459071, +STORE, 140400165634048, 140400174026751, +SNULL, 140400165638143, 140400174026751, +STORE, 140400165634048, 140400165638143, +STORE, 140400165638144, 140400174026751, +SNULL, 140400174030848, 140400182419455, +STORE, 140400182419456, 140400199204863, +STORE, 140400174030848, 140400182419455, +SNULL, 140400182423551, 140400199204863, +STORE, 140400182419456, 140400182423551, +STORE, 140400182423552, 140400199204863, +SNULL, 140400182423552, 140400190812159, +STORE, 140400190812160, 140400199204863, +STORE, 140400182423552, 140400190812159, +SNULL, 140400190816255, 140400199204863, +STORE, 140400190812160, 140400190816255, +STORE, 140400190816256, 140400199204863, +STORE, 140400157241344, 140400165634047, +SNULL, 140400157245439, 140400165634047, +STORE, 140400157241344, 140400157245439, +STORE, 140400157245440, 140400165634047, +SNULL, 140400408895487, 140400417284095, +STORE, 140400408891392, 140400408895487, +STORE, 140400408895488, 140400417284095, +SNULL, 140400417288192, 140400425676799, +STORE, 140400425676800, 140400434069503, +STORE, 140400417288192, 140400425676799, +SNULL, 140400425680895, 140400434069503, +STORE, 140400425676800, 140400425680895, +STORE, 140400425680896, 140400434069503, +STORE, 140400148848640, 140400157241343, +SNULL, 140400148852735, 140400157241343, +STORE, 140400148848640, 140400148852735, +STORE, 140400148852736, 140400157241343, +SNULL, 140400543113216, 140400551501823, +STORE, 140400551501824, 140400559894527, +STORE, 140400543113216, 140400551501823, +SNULL, 140400551505919, 140400559894527, +STORE, 140400551501824, 140400551505919, +STORE, 140400551505920, 140400559894527, +STORE, 140400140455936, 140400148848639, +STORE, 140400048201728, 140400056594431, +SNULL, 140400140460031, 140400148848639, +STORE, 140400140455936, 140400140460031, +STORE, 140400140460032, 140400148848639, +STORE, 140400039809024, 140400056594431, +SNULL, 140400039813119, 140400056594431, +STORE, 140400039809024, 140400039813119, +STORE, 140400039813120, 140400056594431, +STORE, 140400031416320, 140400039809023, +STORE, 140400023023616, 140400039809023, +SNULL, 140400274677759, 140400283066367, +STORE, 140400274673664, 140400274677759, +STORE, 140400274677760, 140400283066367, +STORE, 140400014630912, 140400039809023, +STORE, 140400006238208, 140400039809023, +STORE, 140399997845504, 140400039809023, +SNULL, 140399997849599, 140400039809023, +STORE, 140399997845504, 140399997849599, +STORE, 140399997849600, 140400039809023, +STORE, 140399989452800, 140399997845503, +SNULL, 140399989456895, 140399997845503, +STORE, 140399989452800, 140399989456895, +STORE, 140399989456896, 140399997845503, +STORE, 140399981060096, 140399989452799, +SNULL, 140399981064191, 140399989452799, +STORE, 140399981060096, 140399981064191, +STORE, 140399981064192, 140399989452799, +STORE, 140399972667392, 140399981060095, +STORE, 140399964274688, 140399981060095, +SNULL, 140399964278783, 140399981060095, +STORE, 140399964274688, 140399964278783, +STORE, 140399964278784, 140399981060095, +SNULL, 140400039813120, 140400048201727, +STORE, 140400048201728, 140400056594431, +STORE, 140400039813120, 140400048201727, +SNULL, 140400048205823, 140400056594431, +STORE, 140400048201728, 140400048205823, +STORE, 140400048205824, 140400056594431, +SNULL, 140399997849600, 140400031416319, +STORE, 140400031416320, 140400039809023, +STORE, 140399997849600, 140400031416319, +SNULL, 140400031420415, 140400039809023, +STORE, 140400031416320, 140400031420415, +STORE, 140400031420416, 140400039809023, +STORE, 140399955881984, 140399964274687, +SNULL, 140399955886079, 140399964274687, +STORE, 140399955881984, 140399955886079, +STORE, 140399955886080, 140399964274687, +STORE, 140399947489280, 140399955881983, +STORE, 140399939096576, 140399955881983, +STORE, 140399855267840, 140399863660543, +SNULL, 140399939100671, 140399955881983, +STORE, 140399939096576, 140399939100671, +STORE, 140399939100672, 140399955881983, +SNULL, 140399997849600, 140400014630911, +STORE, 140400014630912, 140400031416319, +STORE, 140399997849600, 140400014630911, +SNULL, 140400014635007, 140400031416319, +STORE, 140400014630912, 140400014635007, +STORE, 140400014635008, 140400031416319, +SNULL, 140400014635008, 140400023023615, +STORE, 140400023023616, 140400031416319, +STORE, 140400014635008, 140400023023615, +SNULL, 140400023027711, 140400031416319, +STORE, 140400023023616, 140400023027711, +STORE, 140400023027712, 140400031416319, +SNULL, 140399997849600, 140400006238207, +STORE, 140400006238208, 140400014630911, +STORE, 140399997849600, 140400006238207, +SNULL, 140400006242303, 140400014630911, +STORE, 140400006238208, 140400006242303, +STORE, 140400006242304, 140400014630911, +STORE, 140399846875136, 140399863660543, +STORE, 140399838482432, 140399863660543, +SNULL, 140399838486527, 140399863660543, +STORE, 140399838482432, 140399838486527, +STORE, 140399838486528, 140399863660543, +SNULL, 140399939100672, 140399947489279, +STORE, 140399947489280, 140399955881983, +STORE, 140399939100672, 140399947489279, +SNULL, 140399947493375, 140399955881983, +STORE, 140399947489280, 140399947493375, +STORE, 140399947493376, 140399955881983, +SNULL, 140399964278784, 140399972667391, +STORE, 140399972667392, 140399981060095, +STORE, 140399964278784, 140399972667391, +SNULL, 140399972671487, 140399981060095, +STORE, 140399972667392, 140399972671487, +STORE, 140399972671488, 140399981060095, +SNULL, 140399838486528, 140399855267839, +STORE, 140399855267840, 140399863660543, +STORE, 140399838486528, 140399855267839, +SNULL, 140399855271935, 140399863660543, +STORE, 140399855267840, 140399855271935, +STORE, 140399855271936, 140399863660543, +STORE, 140399830089728, 140399838482431, +SNULL, 140399830093823, 140399838482431, +STORE, 140399830089728, 140399830093823, +STORE, 140399830093824, 140399838482431, +STORE, 140399821697024, 140399830089727, +SNULL, 140399821701119, 140399830089727, +STORE, 140399821697024, 140399821701119, +STORE, 140399821701120, 140399830089727, +SNULL, 140399838486528, 140399846875135, +STORE, 140399846875136, 140399855267839, +STORE, 140399838486528, 140399846875135, +SNULL, 140399846879231, 140399855267839, +STORE, 140399846875136, 140399846879231, +STORE, 140399846879232, 140399855267839, +STORE, 140399813304320, 140399821697023, +STORE, 140399804911616, 140399821697023, +SNULL, 140399804915711, 140399821697023, +STORE, 140399804911616, 140399804915711, +STORE, 140399804915712, 140399821697023, +STORE, 140399721050112, 140399729442815, +SNULL, 140399804915712, 140399813304319, +STORE, 140399813304320, 140399821697023, +STORE, 140399804915712, 140399813304319, +SNULL, 140399813308415, 140399821697023, +STORE, 140399813304320, 140399813308415, +STORE, 140399813308416, 140399821697023, +SNULL, 140399721054207, 140399729442815, +STORE, 140399721050112, 140399721054207, +STORE, 140399721054208, 140399729442815, +STORE, 140401467105280, 140401467133951, +STORE, 140401279115264, 140401281306623, +SNULL, 140401279115264, 140401279205375, +STORE, 140401279205376, 140401281306623, +STORE, 140401279115264, 140401279205375, +SNULL, 140401281298431, 140401281306623, +STORE, 140401279205376, 140401281298431, +STORE, 140401281298432, 140401281306623, +ERASE, 140401281298432, 140401281306623, +STORE, 140401281298432, 140401281306623, +SNULL, 140401281302527, 140401281306623, +STORE, 140401281298432, 140401281302527, +STORE, 140401281302528, 140401281306623, +ERASE, 140401467105280, 140401467133951, +ERASE, 140400056594432, 140400056598527, +ERASE, 140400056598528, 140400064987135, +ERASE, 140400635396096, 140400635400191, +ERASE, 140400635400192, 140400643788799, +ERASE, 140400408891392, 140400408895487, +ERASE, 140400408895488, 140400417284095, +ERASE, 140400299851776, 140400299855871, +ERASE, 140400299855872, 140400308244479, +ERASE, 140400627003392, 140400627007487, +ERASE, 140400627007488, 140400635396095, +ERASE, 140400954155008, 140400954159103, +ERASE, 140400954159104, 140400962547711, +ERASE, 140400291459072, 140400291463167, +ERASE, 140400291463168, 140400299851775, +ERASE, 140400643788800, 140400643792895, +ERASE, 140400643792896, 140400652181503, +ERASE, 140400325029888, 140400325033983, +ERASE, 140400325033984, 140400333422591, +ERASE, 140400610217984, 140400610222079, +ERASE, 140400610222080, 140400618610687, +ERASE, 140400190812160, 140400190816255, +ERASE, 140400190816256, 140400199204863, +ERASE, 140399964274688, 140399964278783, +ERASE, 140399964278784, 140399972667391, +ERASE, 140400945762304, 140400945766399, +ERASE, 140400945766400, 140400954155007, +ERASE, 140400568287232, 140400568291327, +ERASE, 140400568291328, 140400576679935, +ERASE, 140399972667392, 140399972671487, +ERASE, 140399972671488, 140399981060095, +ERASE, 140400962547712, 140400962551807, +ERASE, 140400962551808, 140400970940415, +ERASE, 140400987725824, 140400987729919, +ERASE, 140400987729920, 140400996118527, +ERASE, 140400652181504, 140400652185599, +ERASE, 140400652185600, 140400660574207, +ERASE, 140400450854912, 140400450859007, +ERASE, 140400450859008, 140400459247615, +ERASE, 140400031416320, 140400031420415, +ERASE, 140400031420416, 140400039809023, +ERASE, 140400308244480, 140400308248575, +ERASE, 140400308248576, 140400316637183, +ERASE, 140400434069504, 140400434073599, +ERASE, 140400434073600, 140400442462207, +ERASE, 140400543109120, 140400543113215, +ERASE, 140400543113216, 140400551501823, +ERASE, 140400023023616, 140400023027711, +ERASE, 140400023027712, 140400031416319, +ERASE, 140399813304320, 140399813308415, +ERASE, 140399813308416, 140399821697023, +ERASE, 140400316637184, 140400316641279, +ERASE, 140400316641280, 140400325029887, +ERASE, 140400585072640, 140400585076735, +ERASE, 140400585076736, 140400593465343, +ERASE, 140400148848640, 140400148852735, +ERASE, 140400148852736, 140400157241343, +ERASE, 140399955881984, 140399955886079, +ERASE, 140399955886080, 140399964274687, +ERASE, 140399821697024, 140399821701119, +ERASE, 140399821701120, 140399830089727, +ERASE, 140400601825280, 140400601829375, +ERASE, 140400601829376, 140400610217983, +ERASE, 140400979333120, 140400979337215, +ERASE, 140400979337216, 140400987725823, +ERASE, 140399997845504, 140399997849599, +ERASE, 140399997849600, 140400006238207, +ERASE, 140400459247616, 140400459251711, +ERASE, 140400459251712, 140400467640319, +ERASE, 140400551501824, 140400551505919, +ERASE, 140400551505920, 140400559894527, +ERASE, 140399939096576, 140399939100671, +ERASE, 140399939100672, 140399947489279, +ERASE, 140400442462208, 140400442466303, +ERASE, 140400442466304, 140400450854911, +ERASE, 140400576679936, 140400576684031, +ERASE, 140400576684032, 140400585072639, +ERASE, 140400559894528, 140400559898623, +ERASE, 140400559898624, 140400568287231, +ERASE, 140400417284096, 140400417288191, +ERASE, 140400417288192, 140400425676799, +ERASE, 140400283066368, 140400283070463, +ERASE, 140400283070464, 140400291459071, + }; + unsigned long set33[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140734562918400, 140737488351231, +SNULL, 140734562922495, 140737488351231, +STORE, 140734562918400, 140734562922495, +STORE, 140734562787328, 140734562922495, +STORE, 94133878984704, 94133881237503, +SNULL, 94133879115775, 94133881237503, +STORE, 94133878984704, 94133879115775, +STORE, 94133879115776, 94133881237503, +ERASE, 94133879115776, 94133881237503, +STORE, 94133881208832, 94133881217023, +STORE, 94133881217024, 94133881237503, +STORE, 140583654043648, 140583656296447, +SNULL, 140583654187007, 140583656296447, +STORE, 140583654043648, 140583654187007, +STORE, 140583654187008, 140583656296447, +ERASE, 140583654187008, 140583656296447, +STORE, 140583656284160, 140583656292351, +STORE, 140583656292352, 140583656296447, +STORE, 140734564319232, 140734564323327, +STORE, 140734564306944, 140734564319231, +STORE, 140583656255488, 140583656284159, +STORE, 140583656247296, 140583656255487, +STORE, 140583651827712, 140583654043647, +SNULL, 140583651827712, 140583651926015, +STORE, 140583651926016, 140583654043647, +STORE, 140583651827712, 140583651926015, +SNULL, 140583654019071, 140583654043647, +STORE, 140583651926016, 140583654019071, +STORE, 140583654019072, 140583654043647, +SNULL, 140583654019072, 140583654027263, +STORE, 140583654027264, 140583654043647, +STORE, 140583654019072, 140583654027263, +ERASE, 140583654019072, 140583654027263, +STORE, 140583654019072, 140583654027263, +ERASE, 140583654027264, 140583654043647, +STORE, 140583654027264, 140583654043647, +STORE, 140583648030720, 140583651827711, +SNULL, 140583648030720, 140583649689599, +STORE, 140583649689600, 140583651827711, +STORE, 140583648030720, 140583649689599, +SNULL, 140583651786751, 140583651827711, +STORE, 140583649689600, 140583651786751, +STORE, 140583651786752, 140583651827711, +SNULL, 140583651786752, 140583651811327, +STORE, 140583651811328, 140583651827711, +STORE, 140583651786752, 140583651811327, +ERASE, 140583651786752, 140583651811327, +STORE, 140583651786752, 140583651811327, +ERASE, 140583651811328, 140583651827711, +STORE, 140583651811328, 140583651827711, +STORE, 140583656239104, 140583656255487, +SNULL, 140583651803135, 140583651811327, +STORE, 140583651786752, 140583651803135, +STORE, 140583651803136, 140583651811327, +SNULL, 140583654023167, 140583654027263, +STORE, 140583654019072, 140583654023167, +STORE, 140583654023168, 140583654027263, +SNULL, 94133881212927, 94133881217023, +STORE, 94133881208832, 94133881212927, +STORE, 94133881212928, 94133881217023, +SNULL, 140583656288255, 140583656292351, +STORE, 140583656284160, 140583656288255, +STORE, 140583656288256, 140583656292351, +ERASE, 140583656255488, 140583656284159, +STORE, 94133881733120, 94133881868287, +STORE, 140583639638016, 140583648030719, +SNULL, 140583639642111, 140583648030719, +STORE, 140583639638016, 140583639642111, +STORE, 140583639642112, 140583648030719, +STORE, 140583631245312, 140583639638015, +STORE, 140583497027584, 140583631245311, +SNULL, 140583497027584, 140583540621311, +STORE, 140583540621312, 140583631245311, +STORE, 140583497027584, 140583540621311, +ERASE, 140583497027584, 140583540621311, +SNULL, 140583607730175, 140583631245311, +STORE, 140583540621312, 140583607730175, +STORE, 140583607730176, 140583631245311, +ERASE, 140583607730176, 140583631245311, +SNULL, 140583540756479, 140583607730175, +STORE, 140583540621312, 140583540756479, +STORE, 140583540756480, 140583607730175, +SNULL, 140583631249407, 140583639638015, +STORE, 140583631245312, 140583631249407, +STORE, 140583631249408, 140583639638015, +STORE, 140583622852608, 140583631245311, +SNULL, 140583622856703, 140583631245311, +STORE, 140583622852608, 140583622856703, +STORE, 140583622856704, 140583631245311, +STORE, 140583614459904, 140583622852607, +SNULL, 140583614463999, 140583622852607, +STORE, 140583614459904, 140583614463999, +STORE, 140583614464000, 140583622852607, +STORE, 140583532228608, 140583540621311, +SNULL, 140583532232703, 140583540621311, +STORE, 140583532228608, 140583532232703, +STORE, 140583532232704, 140583540621311, +STORE, 140583523835904, 140583532228607, +STORE, 140583515443200, 140583532228607, +STORE, 140583507050496, 140583532228607, +STORE, 140583372832768, 140583507050495, +STORE, 140583364440064, 140583372832767, +STORE, 140583230222336, 140583364440063, +STORE, 140583096004608, 140583364440063, +SNULL, 140583230222335, 140583364440063, +STORE, 140583096004608, 140583230222335, +STORE, 140583230222336, 140583364440063, +SNULL, 140583230222336, 140583272185855, +STORE, 140583272185856, 140583364440063, +STORE, 140583230222336, 140583272185855, +ERASE, 140583230222336, 140583272185855, +STORE, 140582961786880, 140583230222335, +SNULL, 140583372832768, 140583406403583, +STORE, 140583406403584, 140583507050495, +STORE, 140583372832768, 140583406403583, +ERASE, 140583372832768, 140583406403583, +SNULL, 140583473512447, 140583507050495, +STORE, 140583406403584, 140583473512447, +STORE, 140583473512448, 140583507050495, +ERASE, 140583473512448, 140583507050495, +SNULL, 140583096004607, 140583230222335, +STORE, 140582961786880, 140583096004607, +STORE, 140583096004608, 140583230222335, +SNULL, 140583096004608, 140583137968127, +STORE, 140583137968128, 140583230222335, +STORE, 140583096004608, 140583137968127, +ERASE, 140583096004608, 140583137968127, +SNULL, 140583339294719, 140583364440063, +STORE, 140583272185856, 140583339294719, +STORE, 140583339294720, 140583364440063, +ERASE, 140583339294720, 140583364440063, +SNULL, 140583272321023, 140583339294719, +STORE, 140583272185856, 140583272321023, +STORE, 140583272321024, 140583339294719, +SNULL, 140582961786880, 140583003750399, +STORE, 140583003750400, 140583096004607, +STORE, 140582961786880, 140583003750399, +ERASE, 140582961786880, 140583003750399, + }; + + unsigned long set34[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140731327180800, 140737488351231, +SNULL, 140731327184895, 140737488351231, +STORE, 140731327180800, 140731327184895, +STORE, 140731327049728, 140731327184895, +STORE, 94632924487680, 94632926740479, +SNULL, 94632924618751, 94632926740479, +STORE, 94632924487680, 94632924618751, +STORE, 94632924618752, 94632926740479, +ERASE, 94632924618752, 94632926740479, +STORE, 94632926711808, 94632926719999, +STORE, 94632926720000, 94632926740479, +STORE, 140012544888832, 140012547141631, +SNULL, 140012545032191, 140012547141631, +STORE, 140012544888832, 140012545032191, +STORE, 140012545032192, 140012547141631, +ERASE, 140012545032192, 140012547141631, +STORE, 140012547129344, 140012547137535, +STORE, 140012547137536, 140012547141631, +STORE, 140731327725568, 140731327729663, +STORE, 140731327713280, 140731327725567, +STORE, 140012547100672, 140012547129343, +STORE, 140012547092480, 140012547100671, +STORE, 140012542672896, 140012544888831, +SNULL, 140012542672896, 140012542771199, +STORE, 140012542771200, 140012544888831, +STORE, 140012542672896, 140012542771199, +SNULL, 140012544864255, 140012544888831, +STORE, 140012542771200, 140012544864255, +STORE, 140012544864256, 140012544888831, +SNULL, 140012544864256, 140012544872447, +STORE, 140012544872448, 140012544888831, +STORE, 140012544864256, 140012544872447, +ERASE, 140012544864256, 140012544872447, +STORE, 140012544864256, 140012544872447, +ERASE, 140012544872448, 140012544888831, +STORE, 140012544872448, 140012544888831, +STORE, 140012538875904, 140012542672895, +SNULL, 140012538875904, 140012540534783, +STORE, 140012540534784, 140012542672895, +STORE, 140012538875904, 140012540534783, +SNULL, 140012542631935, 140012542672895, +STORE, 140012540534784, 140012542631935, +STORE, 140012542631936, 140012542672895, +SNULL, 140012542631936, 140012542656511, +STORE, 140012542656512, 140012542672895, +STORE, 140012542631936, 140012542656511, +ERASE, 140012542631936, 140012542656511, +STORE, 140012542631936, 140012542656511, +ERASE, 140012542656512, 140012542672895, +STORE, 140012542656512, 140012542672895, +STORE, 140012547084288, 140012547100671, +SNULL, 140012542648319, 140012542656511, +STORE, 140012542631936, 140012542648319, +STORE, 140012542648320, 140012542656511, +SNULL, 140012544868351, 140012544872447, +STORE, 140012544864256, 140012544868351, +STORE, 140012544868352, 140012544872447, +SNULL, 94632926715903, 94632926719999, +STORE, 94632926711808, 94632926715903, +STORE, 94632926715904, 94632926719999, +SNULL, 140012547133439, 140012547137535, +STORE, 140012547129344, 140012547133439, +STORE, 140012547133440, 140012547137535, +ERASE, 140012547100672, 140012547129343, +STORE, 94632939606016, 94632939741183, +STORE, 140012530483200, 140012538875903, +SNULL, 140012530487295, 140012538875903, +STORE, 140012530483200, 140012530487295, +STORE, 140012530487296, 140012538875903, +STORE, 140012522090496, 140012530483199, +STORE, 140012387872768, 140012522090495, +SNULL, 140012387872768, 140012444188671, +STORE, 140012444188672, 140012522090495, +STORE, 140012387872768, 140012444188671, +ERASE, 140012387872768, 140012444188671, +SNULL, 140012511297535, 140012522090495, +STORE, 140012444188672, 140012511297535, +STORE, 140012511297536, 140012522090495, +ERASE, 140012511297536, 140012522090495, +SNULL, 140012444323839, 140012511297535, +STORE, 140012444188672, 140012444323839, +STORE, 140012444323840, 140012511297535, +SNULL, 140012522094591, 140012530483199, +STORE, 140012522090496, 140012522094591, +STORE, 140012522094592, 140012530483199, +STORE, 140012513697792, 140012522090495, +SNULL, 140012513701887, 140012522090495, +STORE, 140012513697792, 140012513701887, +STORE, 140012513701888, 140012522090495, +STORE, 140012435795968, 140012444188671, +SNULL, 140012435800063, 140012444188671, +STORE, 140012435795968, 140012435800063, +STORE, 140012435800064, 140012444188671, +STORE, 140012427403264, 140012435795967, +SNULL, 140012427407359, 140012435795967, +STORE, 140012427403264, 140012427407359, +STORE, 140012427407360, 140012435795967, +STORE, 140012419010560, 140012427403263, +STORE, 140012410617856, 140012427403263, +STORE, 140012276400128, 140012410617855, +STORE, 140012268007424, 140012276400127, +STORE, 140012133789696, 140012268007423, +SNULL, 140012133789696, 140012175753215, +STORE, 140012175753216, 140012268007423, +STORE, 140012133789696, 140012175753215, +ERASE, 140012133789696, 140012175753215, +STORE, 140012041535488, 140012268007423, +SNULL, 140012108644351, 140012268007423, +STORE, 140012041535488, 140012108644351, +STORE, 140012108644352, 140012268007423, +SNULL, 140012108644352, 140012175753215, +STORE, 140012175753216, 140012268007423, +STORE, 140012108644352, 140012175753215, +ERASE, 140012108644352, 140012175753215, +SNULL, 140012276400128, 140012309970943, +STORE, 140012309970944, 140012410617855, +STORE, 140012276400128, 140012309970943, +ERASE, 140012276400128, 140012309970943, +STORE, 140012301578240, 140012309970943, +STORE, 140012041535488, 140012268007423, +SNULL, 140012242862079, 140012268007423, +STORE, 140012041535488, 140012242862079, +STORE, 140012242862080, 140012268007423, +ERASE, 140012242862080, 140012268007423, +SNULL, 140012041670655, 140012242862079, +STORE, 140012041535488, 140012041670655, +STORE, 140012041670656, 140012242862079, +SNULL, 140012041670656, 140012108644351, +STORE, 140012108644352, 140012242862079, +STORE, 140012041670656, 140012108644351, +SNULL, 140012108779519, 140012242862079, +STORE, 140012108644352, 140012108779519, +STORE, 140012108779520, 140012242862079, +SNULL, 140012377079807, 140012410617855, +STORE, 140012309970944, 140012377079807, +STORE, 140012377079808, 140012410617855, +ERASE, 140012377079808, 140012410617855, +SNULL, 140012310106111, 140012377079807, +STORE, 140012309970944, 140012310106111, +STORE, 140012310106112, 140012377079807, +SNULL, 140012410621951, 140012427403263, +STORE, 140012410617856, 140012410621951, +STORE, 140012410621952, 140012427403263, +SNULL, 140012108779520, 140012175753215, +STORE, 140012175753216, 140012242862079, +STORE, 140012108779520, 140012175753215, +SNULL, 140012175888383, 140012242862079, +STORE, 140012175753216, 140012175888383, +STORE, 140012175888384, 140012242862079, +SNULL, 140012301582335, 140012309970943, +STORE, 140012301578240, 140012301582335, +STORE, 140012301582336, 140012309970943, +SNULL, 140012410621952, 140012419010559, +STORE, 140012419010560, 140012427403263, +STORE, 140012410621952, 140012419010559, +SNULL, 140012419014655, 140012427403263, +STORE, 140012419010560, 140012419014655, +STORE, 140012419014656, 140012427403263, +SNULL, 140012268011519, 140012276400127, +STORE, 140012268007424, 140012268011519, +STORE, 140012268011520, 140012276400127, +STORE, 140012402225152, 140012410617855, +STORE, 140012393832448, 140012410617855, +SNULL, 140012393832448, 140012402225151, +STORE, 140012402225152, 140012410617855, +STORE, 140012393832448, 140012402225151, +SNULL, 140012402229247, 140012410617855, +STORE, 140012402225152, 140012402229247, +STORE, 140012402229248, 140012410617855, +STORE, 140012385439744, 140012402225151, +SNULL, 140012385439744, 140012393832447, +STORE, 140012393832448, 140012402225151, +STORE, 140012385439744, 140012393832447, +SNULL, 140012393836543, 140012402225151, +STORE, 140012393832448, 140012393836543, +STORE, 140012393836544, 140012402225151, +STORE, 140012293185536, 140012301578239, +STORE, 140012284792832, 140012301578239, +SNULL, 140012284792832, 140012293185535, +STORE, 140012293185536, 140012301578239, +STORE, 140012284792832, 140012293185535, +SNULL, 140012293189631, 140012301578239, +STORE, 140012293185536, 140012293189631, +STORE, 140012293189632, 140012301578239, +STORE, 140012268011520, 140012284792831, +SNULL, 140012385443839, 140012393832447, +STORE, 140012385439744, 140012385443839, +STORE, 140012385443840, 140012393832447, +STORE, 140012259614720, 140012268007423, +SNULL, 140012259618815, 140012268007423, +STORE, 140012259614720, 140012259618815, +STORE, 140012259618816, 140012268007423, +STORE, 140012251222016, 140012259614719, +SNULL, 140012251226111, 140012259614719, +STORE, 140012251222016, 140012251226111, +STORE, 140012251226112, 140012259614719, +SNULL, 140012284796927, 140012293185535, +STORE, 140012284792832, 140012284796927, +STORE, 140012284796928, 140012293185535, +SNULL, 140012268011520, 140012276400127, +STORE, 140012276400128, 140012284792831, +STORE, 140012268011520, 140012276400127, +SNULL, 140012276404223, 140012284792831, +STORE, 140012276400128, 140012276404223, +STORE, 140012276404224, 140012284792831, +STORE, 140012033142784, 140012041535487, +SNULL, 140012033146879, 140012041535487, +STORE, 140012033142784, 140012033146879, +STORE, 140012033146880, 140012041535487, +STORE, 140012024750080, 140012033142783, +STORE, 140012016357376, 140012033142783, +SNULL, 140012016357376, 140012024750079, +STORE, 140012024750080, 140012033142783, +STORE, 140012016357376, 140012024750079, +SNULL, 140012024754175, 140012033142783, +STORE, 140012024750080, 140012024754175, +STORE, 140012024754176, 140012033142783, +SNULL, 140012016361471, 140012024750079, +STORE, 140012016357376, 140012016361471, +STORE, 140012016361472, 140012024750079, +STORE, 140012007964672, 140012016357375, +SNULL, 140012007968767, 140012016357375, +STORE, 140012007964672, 140012007968767, +STORE, 140012007968768, 140012016357375, +STORE, 140011999571968, 140012007964671, +STORE, 140011991179264, 140012007964671, +STORE, 140011856961536, 140011991179263, +STORE, 140011848568832, 140011856961535, +STORE, 140011714351104, 140011848568831, +SNULL, 140011714351104, 140011773100031, +STORE, 140011773100032, 140011848568831, +STORE, 140011714351104, 140011773100031, +ERASE, 140011714351104, 140011773100031, +STORE, 140011764707328, 140011773100031, +STORE, 140011756314624, 140011773100031, +STORE, 140011622096896, 140011756314623, +STORE, 140011613704192, 140011622096895, +STORE, 140011479486464, 140011613704191, +STORE, 140011471093760, 140011479486463, +SNULL, 140011479486464, 140011504664575, +STORE, 140011504664576, 140011613704191, +STORE, 140011479486464, 140011504664575, +ERASE, 140011479486464, 140011504664575, +STORE, 140011496271872, 140011504664575, +STORE, 140011487879168, 140011504664575, +STORE, 140011336876032, 140011471093759, +SNULL, 140011336876032, 140011370446847, +STORE, 140011370446848, 140011471093759, +STORE, 140011336876032, 140011370446847, +ERASE, 140011336876032, 140011370446847, +STORE, 140011471093760, 140011487879167, +STORE, 140011362054144, 140011370446847, +SNULL, 140011362058239, 140011370446847, +STORE, 140011362054144, 140011362058239, +STORE, 140011362058240, 140011370446847, +STORE, 140011353661440, 140011362054143, +STORE, 140011345268736, 140011362054143, +SNULL, 140011345272831, 140011362054143, +STORE, 140011345268736, 140011345272831, +STORE, 140011345272832, 140011362054143, +STORE, 140011336876032, 140011345268735, +STORE, 140011328483328, 140011345268735, +SNULL, 140011328487423, 140011345268735, +STORE, 140011328483328, 140011328487423, +STORE, 140011328487424, 140011345268735, +STORE, 140011320090624, 140011328483327, +STORE, 140011185872896, 140011320090623, +SNULL, 140011185872896, 140011236229119, +STORE, 140011236229120, 140011320090623, +STORE, 140011185872896, 140011236229119, +ERASE, 140011185872896, 140011236229119, +SNULL, 140011856961536, 140011907317759, +STORE, 140011907317760, 140011991179263, +STORE, 140011856961536, 140011907317759, +ERASE, 140011856961536, 140011907317759, +SNULL, 140011974426623, 140011991179263, +STORE, 140011907317760, 140011974426623, +STORE, 140011974426624, 140011991179263, +ERASE, 140011974426624, 140011991179263, +SNULL, 140011840208895, 140011848568831, +STORE, 140011773100032, 140011840208895, +STORE, 140011840208896, 140011848568831, +ERASE, 140011840208896, 140011848568831, +SNULL, 140011773235199, 140011840208895, +STORE, 140011773100032, 140011773235199, +STORE, 140011773235200, 140011840208895, +STORE, 140011102011392, 140011320090623, +SNULL, 140011169120255, 140011320090623, +STORE, 140011102011392, 140011169120255, +STORE, 140011169120256, 140011320090623, +SNULL, 140011169120256, 140011236229119, +STORE, 140011236229120, 140011320090623, +STORE, 140011169120256, 140011236229119, +ERASE, 140011169120256, 140011236229119, +SNULL, 140011622096896, 140011638882303, +STORE, 140011638882304, 140011756314623, +STORE, 140011622096896, 140011638882303, +ERASE, 140011622096896, 140011638882303, +SNULL, 140011705991167, 140011756314623, +STORE, 140011638882304, 140011705991167, +STORE, 140011705991168, 140011756314623, +ERASE, 140011705991168, 140011756314623, +SNULL, 140011571773439, 140011613704191, +STORE, 140011504664576, 140011571773439, +STORE, 140011571773440, 140011613704191, +ERASE, 140011571773440, 140011613704191, +STORE, 140010967793664, 140011169120255, +SNULL, 140011034902527, 140011169120255, +STORE, 140010967793664, 140011034902527, +STORE, 140011034902528, 140011169120255, +SNULL, 140011034902528, 140011102011391, +STORE, 140011102011392, 140011169120255, +STORE, 140011034902528, 140011102011391, +ERASE, 140011034902528, 140011102011391, +STORE, 140010833575936, 140011034902527, +SNULL, 140011437555711, 140011471093759, +STORE, 140011370446848, 140011437555711, +STORE, 140011437555712, 140011471093759, +ERASE, 140011437555712, 140011471093759, +SNULL, 140011370582015, 140011437555711, +STORE, 140011370446848, 140011370582015, +STORE, 140011370582016, 140011437555711, +STORE, 140010699358208, 140011034902527, +SNULL, 140011487883263, 140011504664575, +STORE, 140011487879168, 140011487883263, +STORE, 140011487883264, 140011504664575, +SNULL, 140011345272832, 140011353661439, +STORE, 140011353661440, 140011362054143, +STORE, 140011345272832, 140011353661439, +SNULL, 140011353665535, 140011362054143, +STORE, 140011353661440, 140011353665535, +STORE, 140011353665536, 140011362054143, +SNULL, 140011328487424, 140011336876031, +STORE, 140011336876032, 140011345268735, +STORE, 140011328487424, 140011336876031, +SNULL, 140011336880127, 140011345268735, +STORE, 140011336876032, 140011336880127, +STORE, 140011336880128, 140011345268735, +SNULL, 140011303337983, 140011320090623, +STORE, 140011236229120, 140011303337983, +STORE, 140011303337984, 140011320090623, +ERASE, 140011303337984, 140011320090623, +SNULL, 140011907452927, 140011974426623, +STORE, 140011907317760, 140011907452927, +STORE, 140011907452928, 140011974426623, +SNULL, 140011102146559, 140011169120255, +STORE, 140011102011392, 140011102146559, +STORE, 140011102146560, 140011169120255, +SNULL, 140011639017471, 140011705991167, +STORE, 140011638882304, 140011639017471, +STORE, 140011639017472, 140011705991167, +SNULL, 140011504799743, 140011571773439, +STORE, 140011504664576, 140011504799743, +STORE, 140011504799744, 140011571773439, +SNULL, 140011613708287, 140011622096895, +STORE, 140011613704192, 140011613708287, +STORE, 140011613708288, 140011622096895, +SNULL, 140010699358208, 140010967793663, +STORE, 140010967793664, 140011034902527, +STORE, 140010699358208, 140010967793663, +SNULL, 140010967928831, 140011034902527, +STORE, 140010967793664, 140010967928831, +STORE, 140010967928832, 140011034902527, +SNULL, 140010900684799, 140010967793663, +STORE, 140010699358208, 140010900684799, +STORE, 140010900684800, 140010967793663, +ERASE, 140010900684800, 140010967793663, +SNULL, 140010766467071, 140010900684799, +STORE, 140010699358208, 140010766467071, +STORE, 140010766467072, 140010900684799, +SNULL, 140010766467072, 140010833575935, +STORE, 140010833575936, 140010900684799, +STORE, 140010766467072, 140010833575935, +ERASE, 140010766467072, 140010833575935, +SNULL, 140010699493375, 140010766467071, +STORE, 140010699358208, 140010699493375, +STORE, 140010699493376, 140010766467071, +SNULL, 140011848572927, 140011856961535, +STORE, 140011848568832, 140011848572927, +STORE, 140011848572928, 140011856961535, +STORE, 140011982786560, 140012007964671, +STORE, 140011898925056, 140011907317759, +SNULL, 140011898929151, 140011907317759, +STORE, 140011898925056, 140011898929151, +STORE, 140011898929152, 140011907317759, +SNULL, 140011320094719, 140011328483327, +STORE, 140011320090624, 140011320094719, +STORE, 140011320094720, 140011328483327, +STORE, 140011890532352, 140011898925055, +STORE, 140011882139648, 140011898925055, +SNULL, 140011882143743, 140011898925055, +STORE, 140011882139648, 140011882143743, +STORE, 140011882143744, 140011898925055, +STORE, 140011873746944, 140011882139647, +SNULL, 140011873751039, 140011882139647, +STORE, 140011873746944, 140011873751039, +STORE, 140011873751040, 140011882139647, +SNULL, 140011236364287, 140011303337983, +STORE, 140011236229120, 140011236364287, +STORE, 140011236364288, 140011303337983, +SNULL, 140011756318719, 140011773100031, +STORE, 140011756314624, 140011756318719, +STORE, 140011756318720, 140011773100031, +SNULL, 140011756318720, 140011764707327, +STORE, 140011764707328, 140011773100031, +STORE, 140011756318720, 140011764707327, +SNULL, 140011764711423, 140011773100031, +STORE, 140011764707328, 140011764711423, +STORE, 140011764711424, 140011773100031, +SNULL, 140011471097855, 140011487879167, +STORE, 140011471093760, 140011471097855, +STORE, 140011471097856, 140011487879167, +SNULL, 140010833711103, 140010900684799, +STORE, 140010833575936, 140010833711103, +STORE, 140010833711104, 140010900684799, +SNULL, 140011982790655, 140012007964671, +STORE, 140011982786560, 140011982790655, +STORE, 140011982790656, 140012007964671, +STORE, 140011865354240, 140011873746943, +STORE, 140011848572928, 140011865354239, +SNULL, 140011848572928, 140011856961535, +STORE, 140011856961536, 140011865354239, +STORE, 140011848572928, 140011856961535, +SNULL, 140011856965631, 140011865354239, +STORE, 140011856961536, 140011856965631, +STORE, 140011856965632, 140011865354239, +STORE, 140011747921920, 140011756314623, +STORE, 140011739529216, 140011756314623, +SNULL, 140011471097856, 140011479486463, +STORE, 140011479486464, 140011487879167, +STORE, 140011471097856, 140011479486463, +SNULL, 140011479490559, 140011487879167, +STORE, 140011479486464, 140011479490559, +STORE, 140011479490560, 140011487879167, +STORE, 140011731136512, 140011756314623, +STORE, 140011722743808, 140011756314623, +SNULL, 140011982790656, 140011999571967, +STORE, 140011999571968, 140012007964671, +STORE, 140011982790656, 140011999571967, +SNULL, 140011999576063, 140012007964671, +STORE, 140011999571968, 140011999576063, +STORE, 140011999576064, 140012007964671, +STORE, 140011714351104, 140011756314623, +SNULL, 140011882143744, 140011890532351, +STORE, 140011890532352, 140011898925055, +STORE, 140011882143744, 140011890532351, +SNULL, 140011890536447, 140011898925055, +STORE, 140011890532352, 140011890536447, +STORE, 140011890536448, 140011898925055, +STORE, 140011630489600, 140011638882303, +STORE, 140011613708288, 140011638882303, +STORE, 140011605311488, 140011613704191, +STORE, 140011596918784, 140011613704191, +STORE, 140011588526080, 140011613704191, +SNULL, 140011487883264, 140011496271871, +STORE, 140011496271872, 140011504664575, +STORE, 140011487883264, 140011496271871, +SNULL, 140011496275967, 140011504664575, +STORE, 140011496271872, 140011496275967, +STORE, 140011496275968, 140011504664575, +STORE, 140011580133376, 140011613704191, +SNULL, 140011580137471, 140011613704191, +STORE, 140011580133376, 140011580137471, +STORE, 140011580137472, 140011613704191, +SNULL, 140011982790656, 140011991179263, +STORE, 140011991179264, 140011999571967, +STORE, 140011982790656, 140011991179263, +SNULL, 140011991183359, 140011999571967, +STORE, 140011991179264, 140011991183359, +STORE, 140011991183360, 140011999571967, +SNULL, 140011865358335, 140011873746943, +STORE, 140011865354240, 140011865358335, +STORE, 140011865358336, 140011873746943, +STORE, 140011462701056, 140011471093759, +SNULL, 140011714351104, 140011739529215, +STORE, 140011739529216, 140011756314623, +STORE, 140011714351104, 140011739529215, +SNULL, 140011739533311, 140011756314623, +STORE, 140011739529216, 140011739533311, +STORE, 140011739533312, 140011756314623, +SNULL, 140011739533312, 140011747921919, +STORE, 140011747921920, 140011756314623, +STORE, 140011739533312, 140011747921919, +SNULL, 140011747926015, 140011756314623, +STORE, 140011747921920, 140011747926015, +STORE, 140011747926016, 140011756314623, +SNULL, 140011613708288, 140011630489599, +STORE, 140011630489600, 140011638882303, +STORE, 140011613708288, 140011630489599, +SNULL, 140011630493695, 140011638882303, +STORE, 140011630489600, 140011630493695, +STORE, 140011630493696, 140011638882303, +SNULL, 140011714351104, 140011722743807, +STORE, 140011722743808, 140011739529215, +STORE, 140011714351104, 140011722743807, +SNULL, 140011722747903, 140011739529215, +STORE, 140011722743808, 140011722747903, +STORE, 140011722747904, 140011739529215, +SNULL, 140011714355199, 140011722743807, +STORE, 140011714351104, 140011714355199, +STORE, 140011714355200, 140011722743807, +SNULL, 140011722747904, 140011731136511, +STORE, 140011731136512, 140011739529215, +STORE, 140011722747904, 140011731136511, +SNULL, 140011731140607, 140011739529215, +STORE, 140011731136512, 140011731140607, +STORE, 140011731140608, 140011739529215, +STORE, 140011454308352, 140011471093759, +STORE, 140011445915648, 140011471093759, +SNULL, 140011580137472, 140011588526079, +STORE, 140011588526080, 140011613704191, +STORE, 140011580137472, 140011588526079, +SNULL, 140011588530175, 140011613704191, +STORE, 140011588526080, 140011588530175, +STORE, 140011588530176, 140011613704191, +SNULL, 140011445915648, 140011462701055, +STORE, 140011462701056, 140011471093759, +STORE, 140011445915648, 140011462701055, +SNULL, 140011462705151, 140011471093759, +STORE, 140011462701056, 140011462705151, +STORE, 140011462705152, 140011471093759, +SNULL, 140011588530176, 140011596918783, +STORE, 140011596918784, 140011613704191, +STORE, 140011588530176, 140011596918783, +SNULL, 140011596922879, 140011613704191, +STORE, 140011596918784, 140011596922879, +STORE, 140011596922880, 140011613704191, +SNULL, 140011596922880, 140011605311487, +STORE, 140011605311488, 140011613704191, +STORE, 140011596922880, 140011605311487, +SNULL, 140011605315583, 140011613704191, +STORE, 140011605311488, 140011605315583, +STORE, 140011605315584, 140011613704191, +SNULL, 140011613708288, 140011622096895, +STORE, 140011622096896, 140011630489599, +STORE, 140011613708288, 140011622096895, +SNULL, 140011622100991, 140011630489599, +STORE, 140011622096896, 140011622100991, +STORE, 140011622100992, 140011630489599, +STORE, 140011311697920, 140011320090623, +STORE, 140011227836416, 140011236229119, +STORE, 140011219443712, 140011236229119, +SNULL, 140011219447807, 140011236229119, +STORE, 140011219443712, 140011219447807, +STORE, 140011219447808, 140011236229119, +STORE, 140011211051008, 140011219443711, +STORE, 140011202658304, 140011219443711, +SNULL, 140011202662399, 140011219443711, +STORE, 140011202658304, 140011202662399, +STORE, 140011202662400, 140011219443711, +STORE, 140011194265600, 140011202658303, +STORE, 140011185872896, 140011202658303, +STORE, 140011177480192, 140011202658303, +STORE, 140011093618688, 140011102011391, +SNULL, 140011445915648, 140011454308351, +STORE, 140011454308352, 140011462701055, +STORE, 140011445915648, 140011454308351, +SNULL, 140011454312447, 140011462701055, +STORE, 140011454308352, 140011454312447, +STORE, 140011454312448, 140011462701055, +STORE, 140011085225984, 140011102011391, +SNULL, 140011085230079, 140011102011391, +STORE, 140011085225984, 140011085230079, +STORE, 140011085230080, 140011102011391, +SNULL, 140011177484287, 140011202658303, +STORE, 140011177480192, 140011177484287, +STORE, 140011177484288, 140011202658303, +SNULL, 140011445919743, 140011454308351, +STORE, 140011445915648, 140011445919743, +STORE, 140011445919744, 140011454308351, +SNULL, 140011177484288, 140011185872895, +STORE, 140011185872896, 140011202658303, +STORE, 140011177484288, 140011185872895, +SNULL, 140011185876991, 140011202658303, +STORE, 140011185872896, 140011185876991, +STORE, 140011185876992, 140011202658303, +STORE, 140011076833280, 140011085225983, +SNULL, 140011202662400, 140011211051007, +STORE, 140011211051008, 140011219443711, +STORE, 140011202662400, 140011211051007, +SNULL, 140011211055103, 140011219443711, +STORE, 140011211051008, 140011211055103, +STORE, 140011211055104, 140011219443711, +SNULL, 140011185876992, 140011194265599, +STORE, 140011194265600, 140011202658303, +STORE, 140011185876992, 140011194265599, +SNULL, 140011194269695, 140011202658303, +STORE, 140011194265600, 140011194269695, +STORE, 140011194269696, 140011202658303, +STORE, 140011068440576, 140011085225983, +SNULL, 140011311702015, 140011320090623, +STORE, 140011311697920, 140011311702015, +STORE, 140011311702016, 140011320090623, +STORE, 140011060047872, 140011085225983, +SNULL, 140011060051967, 140011085225983, +STORE, 140011060047872, 140011060051967, +STORE, 140011060051968, 140011085225983, +STORE, 140011051655168, 140011060047871, +STORE, 140011043262464, 140011060047871, +SNULL, 140011043266559, 140011060047871, +STORE, 140011043262464, 140011043266559, +STORE, 140011043266560, 140011060047871, +SNULL, 140011219447808, 140011227836415, +STORE, 140011227836416, 140011236229119, +STORE, 140011219447808, 140011227836415, +SNULL, 140011227840511, 140011236229119, +STORE, 140011227836416, 140011227840511, +STORE, 140011227840512, 140011236229119, +SNULL, 140011085230080, 140011093618687, +STORE, 140011093618688, 140011102011391, +STORE, 140011085230080, 140011093618687, +SNULL, 140011093622783, 140011102011391, +STORE, 140011093618688, 140011093622783, +STORE, 140011093622784, 140011102011391, +STORE, 140010959400960, 140010967793663, +STORE, 140010951008256, 140010967793663, +SNULL, 140010951008256, 140010959400959, +STORE, 140010959400960, 140010967793663, +STORE, 140010951008256, 140010959400959, +SNULL, 140010959405055, 140010967793663, +STORE, 140010959400960, 140010959405055, +STORE, 140010959405056, 140010967793663, +STORE, 140010942615552, 140010959400959, +STORE, 140010934222848, 140010959400959, +SNULL, 140011060051968, 140011076833279, +STORE, 140011076833280, 140011085225983, +STORE, 140011060051968, 140011076833279, +SNULL, 140011076837375, 140011085225983, +STORE, 140011076833280, 140011076837375, +STORE, 140011076837376, 140011085225983, +SNULL, 140011043266560, 140011051655167, +STORE, 140011051655168, 140011060047871, +STORE, 140011043266560, 140011051655167, +SNULL, 140011051659263, 140011060047871, +STORE, 140011051655168, 140011051659263, +STORE, 140011051659264, 140011060047871, +STORE, 140010925830144, 140010959400959, +SNULL, 140011060051968, 140011068440575, +STORE, 140011068440576, 140011076833279, +STORE, 140011060051968, 140011068440575, +SNULL, 140011068444671, 140011076833279, +STORE, 140011068440576, 140011068444671, +STORE, 140011068444672, 140011076833279, +STORE, 140010917437440, 140010959400959, +STORE, 140010909044736, 140010959400959, +STORE, 140010825183232, 140010833575935, +SNULL, 140010909044736, 140010942615551, +STORE, 140010942615552, 140010959400959, +STORE, 140010909044736, 140010942615551, +SNULL, 140010942619647, 140010959400959, +STORE, 140010942615552, 140010942619647, +STORE, 140010942619648, 140010959400959, +SNULL, 140010909044736, 140010934222847, +STORE, 140010934222848, 140010942615551, +STORE, 140010909044736, 140010934222847, +SNULL, 140010934226943, 140010942615551, +STORE, 140010934222848, 140010934226943, +STORE, 140010934226944, 140010942615551, +SNULL, 140010909048831, 140010934222847, +STORE, 140010909044736, 140010909048831, +STORE, 140010909048832, 140010934222847, +STORE, 140010816790528, 140010833575935, +SNULL, 140010816794623, 140010833575935, +STORE, 140010816790528, 140010816794623, +STORE, 140010816794624, 140010833575935, +STORE, 140010808397824, 140010816790527, +SNULL, 140010942619648, 140010951008255, +STORE, 140010951008256, 140010959400959, +STORE, 140010942619648, 140010951008255, +SNULL, 140010951012351, 140010959400959, +STORE, 140010951008256, 140010951012351, +STORE, 140010951012352, 140010959400959, +STORE, 140010800005120, 140010816790527, +SNULL, 140010800009215, 140010816790527, +STORE, 140010800005120, 140010800009215, +STORE, 140010800009216, 140010816790527, +SNULL, 140010909048832, 140010925830143, +STORE, 140010925830144, 140010934222847, +STORE, 140010909048832, 140010925830143, +SNULL, 140010925834239, 140010934222847, +STORE, 140010925830144, 140010925834239, +STORE, 140010925834240, 140010934222847, +SNULL, 140010816794624, 140010825183231, +STORE, 140010825183232, 140010833575935, +STORE, 140010816794624, 140010825183231, +SNULL, 140010825187327, 140010833575935, +STORE, 140010825183232, 140010825187327, +STORE, 140010825187328, 140010833575935, +SNULL, 140010909048832, 140010917437439, +STORE, 140010917437440, 140010925830143, +STORE, 140010909048832, 140010917437439, +SNULL, 140010917441535, 140010925830143, +STORE, 140010917437440, 140010917441535, +STORE, 140010917441536, 140010925830143, +SNULL, 140010800009216, 140010808397823, +STORE, 140010808397824, 140010816790527, +STORE, 140010800009216, 140010808397823, +SNULL, 140010808401919, 140010816790527, +STORE, 140010808397824, 140010808401919, +STORE, 140010808401920, 140010816790527, +STORE, 140010791612416, 140010800005119, +SNULL, 140010791616511, 140010800005119, +STORE, 140010791612416, 140010791616511, +STORE, 140010791616512, 140010800005119, +STORE, 140012547100672, 140012547129343, +STORE, 140012511506432, 140012513697791, +SNULL, 140012511506432, 140012511596543, +STORE, 140012511596544, 140012513697791, +STORE, 140012511506432, 140012511596543, +SNULL, 140012513689599, 140012513697791, +STORE, 140012511596544, 140012513689599, +STORE, 140012513689600, 140012513697791, +ERASE, 140012513689600, 140012513697791, +STORE, 140012513689600, 140012513697791, +SNULL, 140012513693695, 140012513697791, +STORE, 140012513689600, 140012513693695, +STORE, 140012513693696, 140012513697791, +ERASE, 140012547100672, 140012547129343, +ERASE, 140011362054144, 140011362058239, +ERASE, 140011362058240, 140011370446847, +ERASE, 140011882139648, 140011882143743, +ERASE, 140011882143744, 140011890532351, +ERASE, 140011873746944, 140011873751039, +ERASE, 140011873751040, 140011882139647, +ERASE, 140011588526080, 140011588530175, +ERASE, 140011588530176, 140011596918783, +ERASE, 140011328483328, 140011328487423, +ERASE, 140011328487424, 140011336876031, +ERASE, 140011898925056, 140011898929151, +ERASE, 140011898929152, 140011907317759, +ERASE, 140011353661440, 140011353665535, +ERASE, 140011353665536, 140011362054143, +ERASE, 140011336876032, 140011336880127, +ERASE, 140011336880128, 140011345268735, +ERASE, 140011731136512, 140011731140607, +ERASE, 140011731140608, 140011739529215, +ERASE, 140011479486464, 140011479490559, +ERASE, 140011479490560, 140011487879167, +ERASE, 140011756314624, 140011756318719, +ERASE, 140011756318720, 140011764707327, +ERASE, 140011580133376, 140011580137471, +ERASE, 140011580137472, 140011588526079, +ERASE, 140011219443712, 140011219447807, +ERASE, 140011219447808, 140011227836415, +ERASE, 140011051655168, 140011051659263, +ERASE, 140011051659264, 140011060047871, +ERASE, 140011999571968, 140011999576063, +ERASE, 140011999576064, 140012007964671, +ERASE, 140011714351104, 140011714355199, +ERASE, 140011714355200, 140011722743807, +ERASE, 140011739529216, 140011739533311, +ERASE, 140011739533312, 140011747921919, +ERASE, 140011320090624, 140011320094719, +ERASE, 140011320094720, 140011328483327, +ERASE, 140011630489600, 140011630493695, +ERASE, 140011630493696, 140011638882303, +ERASE, 140011345268736, 140011345272831, +ERASE, 140011345272832, 140011353661439, +ERASE, 140011496271872, 140011496275967, +ERASE, 140011496275968, 140011504664575, +ERASE, 140011194265600, 140011194269695, +ERASE, 140011194269696, 140011202658303, +ERASE, 140011068440576, 140011068444671, +ERASE, 140011068444672, 140011076833279, +ERASE, 140010909044736, 140010909048831, +ERASE, 140010909048832, 140010917437439, +ERASE, 140011764707328, 140011764711423, +ERASE, 140011764711424, 140011773100031, +ERASE, 140011462701056, 140011462705151, +ERASE, 140011462705152, 140011471093759, +ERASE, 140011076833280, 140011076837375, +ERASE, 140011076837376, 140011085225983, +ERASE, 140011991179264, 140011991183359, +ERASE, 140011991183360, 140011999571967, +ERASE, 140011211051008, 140011211055103, +ERASE, 140011211055104, 140011219443711, +ERASE, 140010917437440, 140010917441535, +ERASE, 140010917441536, 140010925830143, +ERASE, 140011085225984, 140011085230079, +ERASE, 140011085230080, 140011093618687, +ERASE, 140011487879168, 140011487883263, +ERASE, 140011487883264, 140011496271871, +ERASE, 140011856961536, 140011856965631, +ERASE, 140011856965632, 140011865354239, +ERASE, 140011982786560, 140011982790655, +ERASE, 140011982790656, 140011991179263, +ERASE, 140011722743808, 140011722747903, +ERASE, 140011722747904, 140011731136511, +ERASE, 140011177480192, 140011177484287, +ERASE, 140011177484288, 140011185872895, +ERASE, 140011848568832, 140011848572927, +ERASE, 140011848572928, 140011856961535, +ERASE, 140011890532352, 140011890536447, +ERASE, 140011890536448, 140011898925055, +ERASE, 140011622096896, 140011622100991, +ERASE, 140011622100992, 140011630489599, +ERASE, 140011311697920, 140011311702015, +ERASE, 140011311702016, 140011320090623, +ERASE, 140011471093760, 140011471097855, +ERASE, 140011471097856, 140011479486463, +ERASE, 140011605311488, 140011605315583, +ERASE, 140011605315584, 140011613704191, +ERASE, 140010791612416, 140010791616511, +ERASE, 140010791616512, 140010800005119, +ERASE, 140010959400960, 140010959405055, +ERASE, 140010959405056, 140010967793663, +ERASE, 140011185872896, 140011185876991, +ERASE, 140011185876992, 140011194265599, +ERASE, 140011454308352, 140011454312447, +ERASE, 140011454312448, 140011462701055, +ERASE, 140011596918784, 140011596922879, +ERASE, 140011596922880, 140011605311487, +ERASE, 140011060047872, 140011060051967, +ERASE, 140011060051968, 140011068440575, +ERASE, 140010925830144, 140010925834239, +ERASE, 140010925834240, 140010934222847, +ERASE, 140011747921920, 140011747926015, +ERASE, 140011747926016, 140011756314623, +ERASE, 140011202658304, 140011202662399, +ERASE, 140011202662400, 140011211051007, +ERASE, 140010800005120, 140010800009215, +ERASE, 140010800009216, 140010808397823, +ERASE, 140011093618688, 140011093622783, +ERASE, 140011093622784, 140011102011391, +ERASE, 140010808397824, 140010808401919, +ERASE, 140010808401920, 140010816790527, +ERASE, 140012419010560, 140012419014655, +ERASE, 140012419014656, 140012427403263, +ERASE, 140010934222848, 140010934226943, +ERASE, 140010934226944, 140010942615551, +ERASE, 140010942615552, 140010942619647, +ERASE, 140010942619648, 140010951008255, +ERASE, 140011613704192, 140011613708287, +ERASE, 140011613708288, 140011622096895, +ERASE, 140011865354240, 140011865358335, +ERASE, 140011865358336, 140011873746943, +ERASE, 140012301578240, 140012301582335, +ERASE, 140012301582336, 140012309970943, +ERASE, 140012393832448, 140012393836543, +ERASE, 140012393836544, 140012402225151, +ERASE, 140012410617856, 140012410621951, +ERASE, 140012410621952, 140012419010559, +ERASE, 140012402225152, 140012402229247, +ERASE, 140012402229248, 140012410617855, +ERASE, 140012259614720, 140012259618815, +ERASE, 140012259618816, 140012268007423, +ERASE, 140012251222016, 140012251226111, +ERASE, 140012251226112, 140012259614719, +ERASE, 140012284792832, 140012284796927, +ERASE, 140012284796928, 140012293185535, +ERASE, 140011445915648, 140011445919743, +ERASE, 140011445919744, 140011454308351, +ERASE, 140010951008256, 140010951012351, +ERASE, 140010951012352, 140010959400959, +ERASE, 140011043262464, 140011043266559, +ERASE, 140011043266560, 140011051655167, +ERASE, 140010825183232, 140010825187327, +ERASE, 140010825187328, 140010833575935, +ERASE, 140012293185536, 140012293189631, +ERASE, 140012293189632, 140012301578239, +ERASE, 140012276400128, 140012276404223, +ERASE, 140012276404224, 140012284792831, +ERASE, 140012016357376, 140012016361471, +ERASE, 140012016361472, 140012024750079, +ERASE, 140012024750080, 140012024754175, +ERASE, 140012024754176, 140012033142783, +ERASE, 140011227836416, 140011227840511, +ERASE, 140011227840512, 140011236229119, +ERASE, 140010816790528, 140010816794623, +ERASE, 140010816794624, 140010825183231, +ERASE, 140012268007424, 140012268011519, +ERASE, 140012268011520, 140012276400127, +ERASE, 140012385439744, 140012385443839, +ERASE, 140012385443840, 140012393832447, +ERASE, 140012522090496, 140012522094591, +ERASE, 140012522094592, 140012530483199, +ERASE, 140012033142784, 140012033146879, +ERASE, 140012033146880, 140012041535487, + }; + unsigned long set35[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140730536939520, 140737488351231, +SNULL, 140730536943615, 140737488351231, +STORE, 140730536939520, 140730536943615, +STORE, 140730536808448, 140730536943615, +STORE, 94245239877632, 94245242130431, +SNULL, 94245240008703, 94245242130431, +STORE, 94245239877632, 94245240008703, +STORE, 94245240008704, 94245242130431, +ERASE, 94245240008704, 94245242130431, +STORE, 94245242101760, 94245242109951, +STORE, 94245242109952, 94245242130431, +STORE, 140475575263232, 140475577516031, +SNULL, 140475575406591, 140475577516031, +STORE, 140475575263232, 140475575406591, +STORE, 140475575406592, 140475577516031, +ERASE, 140475575406592, 140475577516031, +STORE, 140475577503744, 140475577511935, +STORE, 140475577511936, 140475577516031, +STORE, 140730538164224, 140730538168319, +STORE, 140730538151936, 140730538164223, +STORE, 140475577475072, 140475577503743, +STORE, 140475577466880, 140475577475071, +STORE, 140475573047296, 140475575263231, +SNULL, 140475573047296, 140475573145599, +STORE, 140475573145600, 140475575263231, +STORE, 140475573047296, 140475573145599, +SNULL, 140475575238655, 140475575263231, +STORE, 140475573145600, 140475575238655, +STORE, 140475575238656, 140475575263231, +SNULL, 140475575238656, 140475575246847, +STORE, 140475575246848, 140475575263231, +STORE, 140475575238656, 140475575246847, +ERASE, 140475575238656, 140475575246847, +STORE, 140475575238656, 140475575246847, +ERASE, 140475575246848, 140475575263231, +STORE, 140475575246848, 140475575263231, +STORE, 140475569250304, 140475573047295, +SNULL, 140475569250304, 140475570909183, +STORE, 140475570909184, 140475573047295, +STORE, 140475569250304, 140475570909183, +SNULL, 140475573006335, 140475573047295, +STORE, 140475570909184, 140475573006335, +STORE, 140475573006336, 140475573047295, +SNULL, 140475573006336, 140475573030911, +STORE, 140475573030912, 140475573047295, +STORE, 140475573006336, 140475573030911, +ERASE, 140475573006336, 140475573030911, +STORE, 140475573006336, 140475573030911, +ERASE, 140475573030912, 140475573047295, +STORE, 140475573030912, 140475573047295, +STORE, 140475577458688, 140475577475071, +SNULL, 140475573022719, 140475573030911, +STORE, 140475573006336, 140475573022719, +STORE, 140475573022720, 140475573030911, +SNULL, 140475575242751, 140475575246847, +STORE, 140475575238656, 140475575242751, +STORE, 140475575242752, 140475575246847, +SNULL, 94245242105855, 94245242109951, +STORE, 94245242101760, 94245242105855, +STORE, 94245242105856, 94245242109951, +SNULL, 140475577507839, 140475577511935, +STORE, 140475577503744, 140475577507839, +STORE, 140475577507840, 140475577511935, +ERASE, 140475577475072, 140475577503743, +STORE, 94245271216128, 94245271351295, +STORE, 140475560857600, 140475569250303, +SNULL, 140475560861695, 140475569250303, +STORE, 140475560857600, 140475560861695, +STORE, 140475560861696, 140475569250303, +STORE, 140475552464896, 140475560857599, +STORE, 140475418247168, 140475552464895, +SNULL, 140475418247168, 140475428241407, +STORE, 140475428241408, 140475552464895, +STORE, 140475418247168, 140475428241407, +ERASE, 140475418247168, 140475428241407, +SNULL, 140475495350271, 140475552464895, +STORE, 140475428241408, 140475495350271, +STORE, 140475495350272, 140475552464895, +ERASE, 140475495350272, 140475552464895, +SNULL, 140475428376575, 140475495350271, +STORE, 140475428241408, 140475428376575, +STORE, 140475428376576, 140475495350271, +SNULL, 140475552468991, 140475560857599, +STORE, 140475552464896, 140475552468991, +STORE, 140475552468992, 140475560857599, +STORE, 140475544072192, 140475552464895, +SNULL, 140475544076287, 140475552464895, +STORE, 140475544072192, 140475544076287, +STORE, 140475544076288, 140475552464895, +STORE, 140475535679488, 140475544072191, +SNULL, 140475535683583, 140475544072191, +STORE, 140475535679488, 140475535683583, +STORE, 140475535683584, 140475544072191, +STORE, 140475527286784, 140475535679487, +SNULL, 140475527290879, 140475535679487, +STORE, 140475527286784, 140475527290879, +STORE, 140475527290880, 140475535679487, +STORE, 140475518894080, 140475527286783, +STORE, 140475510501376, 140475527286783, +STORE, 140475502108672, 140475527286783, +STORE, 140475419848704, 140475428241407, +STORE, 140475285630976, 140475419848703, +SNULL, 140475285630976, 140475294023679, +STORE, 140475294023680, 140475419848703, +STORE, 140475285630976, 140475294023679, +ERASE, 140475285630976, 140475294023679, +STORE, 140475159805952, 140475419848703, +STORE, 140475025588224, 140475419848703, +SNULL, 140475092697087, 140475419848703, +STORE, 140475025588224, 140475092697087, +STORE, 140475092697088, 140475419848703, +SNULL, 140475092697088, 140475159805951, +STORE, 140475159805952, 140475419848703, +STORE, 140475092697088, 140475159805951, +ERASE, 140475092697088, 140475159805951, +STORE, 140474891370496, 140475092697087, +SNULL, 140474958479359, 140475092697087, +STORE, 140474891370496, 140474958479359, +STORE, 140474958479360, 140475092697087, +SNULL, 140474958479360, 140475025588223, +STORE, 140475025588224, 140475092697087, +STORE, 140474958479360, 140475025588223, +ERASE, 140474958479360, 140475025588223, +SNULL, 140475361132543, 140475419848703, +STORE, 140475159805952, 140475361132543, +STORE, 140475361132544, 140475419848703, +ERASE, 140475361132544, 140475419848703, +SNULL, 140475159805952, 140475294023679, +STORE, 140475294023680, 140475361132543, +STORE, 140475159805952, 140475294023679, +SNULL, 140475294158847, 140475361132543, +STORE, 140475294023680, 140475294158847, +STORE, 140475294158848, 140475361132543, +SNULL, 140475226914815, 140475294023679, +STORE, 140475159805952, 140475226914815, +STORE, 140475226914816, 140475294023679, +ERASE, 140475226914816, 140475294023679, +SNULL, 140475025723391, 140475092697087, +STORE, 140475025588224, 140475025723391, +STORE, 140475025723392, 140475092697087, +SNULL, 140475159941119, 140475226914815, +STORE, 140475159805952, 140475159941119, +STORE, 140475159941120, 140475226914815, +SNULL, 140474891505663, 140474958479359, +STORE, 140474891370496, 140474891505663, +STORE, 140474891505664, 140474958479359, +SNULL, 140475502108672, 140475518894079, +STORE, 140475518894080, 140475527286783, +STORE, 140475502108672, 140475518894079, +SNULL, 140475518898175, 140475527286783, +STORE, 140475518894080, 140475518898175, +STORE, 140475518898176, 140475527286783, +STORE, 140475411456000, 140475428241407, +SNULL, 140475502112767, 140475518894079, +STORE, 140475502108672, 140475502112767, +STORE, 140475502112768, 140475518894079, +SNULL, 140475411460095, 140475428241407, +STORE, 140475411456000, 140475411460095, +STORE, 140475411460096, 140475428241407, +SNULL, 140475411460096, 140475419848703, +STORE, 140475419848704, 140475428241407, +STORE, 140475411460096, 140475419848703, +SNULL, 140475419852799, 140475428241407, +STORE, 140475419848704, 140475419852799, +STORE, 140475419852800, 140475428241407, +STORE, 140475403063296, 140475411455999, +SNULL, 140475502112768, 140475510501375, +STORE, 140475510501376, 140475518894079, +STORE, 140475502112768, 140475510501375, +SNULL, 140475510505471, 140475518894079, +STORE, 140475510501376, 140475510505471, +STORE, 140475510505472, 140475518894079, +SNULL, 140475403067391, 140475411455999, +STORE, 140475403063296, 140475403067391, +STORE, 140475403067392, 140475411455999, +STORE, 140475394670592, 140475403063295, +SNULL, 140475394674687, 140475403063295, +STORE, 140475394670592, 140475394674687, +STORE, 140475394674688, 140475403063295, +STORE, 140475386277888, 140475394670591, +STORE, 140475377885184, 140475394670591, +STORE, 140475369492480, 140475394670591, +SNULL, 140475369496575, 140475394670591, +STORE, 140475369492480, 140475369496575, +STORE, 140475369496576, 140475394670591, +SNULL, 140475369496576, 140475377885183, +STORE, 140475377885184, 140475394670591, +STORE, 140475369496576, 140475377885183, +SNULL, 140475377889279, 140475394670591, +STORE, 140475377885184, 140475377889279, +STORE, 140475377889280, 140475394670591, +STORE, 140475285630976, 140475294023679, +SNULL, 140475377889280, 140475386277887, +STORE, 140475386277888, 140475394670591, +STORE, 140475377889280, 140475386277887, +SNULL, 140475386281983, 140475394670591, +STORE, 140475386277888, 140475386281983, +STORE, 140475386281984, 140475394670591, +SNULL, 140475285635071, 140475294023679, +STORE, 140475285630976, 140475285635071, +STORE, 140475285635072, 140475294023679, +STORE, 140475277238272, 140475285630975, +STORE, 140475268845568, 140475285630975, +SNULL, 140475268845568, 140475277238271, +STORE, 140475277238272, 140475285630975, +STORE, 140475268845568, 140475277238271, +SNULL, 140475277242367, 140475285630975, +STORE, 140475277238272, 140475277242367, +STORE, 140475277242368, 140475285630975, +STORE, 140475260452864, 140475277238271, +SNULL, 140475260452864, 140475268845567, +STORE, 140475268845568, 140475277238271, +STORE, 140475260452864, 140475268845567, +SNULL, 140475268849663, 140475277238271, +STORE, 140475268845568, 140475268849663, +STORE, 140475268849664, 140475277238271, +SNULL, 140475260456959, 140475268845567, +STORE, 140475260452864, 140475260456959, +STORE, 140475260456960, 140475268845567, +STORE, 140475252060160, 140475260452863, +SNULL, 140475252064255, 140475260452863, +STORE, 140475252060160, 140475252064255, +STORE, 140475252064256, 140475260452863, +STORE, 140475243667456, 140475252060159, +SNULL, 140475243671551, 140475252060159, +STORE, 140475243667456, 140475243671551, +STORE, 140475243671552, 140475252060159, +STORE, 140475235274752, 140475243667455, +STORE, 140475151413248, 140475159805951, +STORE, 140474891505664, 140475025588223, +STORE, 140475143020544, 140475159805951, +SNULL, 140474891505664, 140474958479359, +STORE, 140474958479360, 140475025588223, +STORE, 140474891505664, 140474958479359, +SNULL, 140474958614527, 140475025588223, +STORE, 140474958479360, 140474958614527, +STORE, 140474958614528, 140475025588223, +STORE, 140474824261632, 140474891370495, +SNULL, 140474824396799, 140474891370495, +STORE, 140474824261632, 140474824396799, +STORE, 140474824396800, 140474891370495, +STORE, 140475134627840, 140475159805951, +STORE, 140474690043904, 140474824261631, +STORE, 140475126235136, 140475159805951, +STORE, 140475117842432, 140475159805951, +STORE, 140474622935040, 140474824261631, +STORE, 140475109449728, 140475159805951, +STORE, 140474488717312, 140474824261631, +STORE, 140475101057024, 140475159805951, +STORE, 140474480324608, 140474488717311, +STORE, 140474413215744, 140474480324607, +STORE, 140474404823040, 140474413215743, +ERASE, 140474413215744, 140474480324607, +STORE, 140474471931904, 140474488717311, +STORE, 140474270605312, 140474404823039, +SNULL, 140475101057024, 140475126235135, +STORE, 140475126235136, 140475159805951, +STORE, 140475101057024, 140475126235135, +SNULL, 140475126239231, 140475159805951, +STORE, 140475126235136, 140475126239231, +STORE, 140475126239232, 140475159805951, +STORE, 140474463539200, 140474488717311, +STORE, 140474455146496, 140474488717311, +SNULL, 140474455150591, 140474488717311, +STORE, 140474455146496, 140474455150591, +STORE, 140474455150592, 140474488717311, +STORE, 140474446753792, 140474455146495, +SNULL, 140474446757887, 140474455146495, +STORE, 140474446753792, 140474446757887, +STORE, 140474446757888, 140474455146495, +STORE, 140474438361088, 140474446753791, +STORE, 140474429968384, 140474446753791, +SNULL, 140474429972479, 140474446753791, +STORE, 140474429968384, 140474429972479, +STORE, 140474429972480, 140474446753791, +SNULL, 140475235278847, 140475243667455, +STORE, 140475235274752, 140475235278847, +STORE, 140475235278848, 140475243667455, +SNULL, 140474757152767, 140474824261631, +STORE, 140474488717312, 140474757152767, +STORE, 140474757152768, 140474824261631, +ERASE, 140474757152768, 140474824261631, +SNULL, 140474488717312, 140474690043903, +STORE, 140474690043904, 140474757152767, +STORE, 140474488717312, 140474690043903, +SNULL, 140474690179071, 140474757152767, +STORE, 140474690043904, 140474690179071, +STORE, 140474690179072, 140474757152767, +SNULL, 140474488717312, 140474622935039, +STORE, 140474622935040, 140474690043903, +STORE, 140474488717312, 140474622935039, +SNULL, 140474623070207, 140474690043903, +STORE, 140474622935040, 140474623070207, +STORE, 140474623070208, 140474690043903, +SNULL, 140475101057024, 140475117842431, +STORE, 140475117842432, 140475126235135, +STORE, 140475101057024, 140475117842431, +SNULL, 140475117846527, 140475126235135, +STORE, 140475117842432, 140475117846527, +STORE, 140475117846528, 140475126235135, +SNULL, 140474555826175, 140474622935039, +STORE, 140474488717312, 140474555826175, +STORE, 140474555826176, 140474622935039, +ERASE, 140474555826176, 140474622935039, +STORE, 140474136387584, 140474404823039, +SNULL, 140474136387584, 140474153172991, +STORE, 140474153172992, 140474404823039, +STORE, 140474136387584, 140474153172991, +ERASE, 140474136387584, 140474153172991, +STORE, 140474018955264, 140474404823039, +STORE, 140473884737536, 140474404823039, +SNULL, 140474086064127, 140474404823039, +STORE, 140473884737536, 140474086064127, +STORE, 140474086064128, 140474404823039, +SNULL, 140474086064128, 140474153172991, +STORE, 140474153172992, 140474404823039, +STORE, 140474086064128, 140474153172991, +ERASE, 140474086064128, 140474153172991, +STORE, 140473750519808, 140474086064127, +SNULL, 140473817628671, 140474086064127, +STORE, 140473750519808, 140473817628671, +STORE, 140473817628672, 140474086064127, +SNULL, 140473817628672, 140473884737535, +STORE, 140473884737536, 140474086064127, +STORE, 140473817628672, 140473884737535, +ERASE, 140473817628672, 140473884737535, +SNULL, 140475126239232, 140475151413247, +STORE, 140475151413248, 140475159805951, +STORE, 140475126239232, 140475151413247, +SNULL, 140475151417343, 140475159805951, +STORE, 140475151413248, 140475151417343, +STORE, 140475151417344, 140475159805951, +SNULL, 140474270605311, 140474404823039, +STORE, 140474153172992, 140474270605311, +STORE, 140474270605312, 140474404823039, +SNULL, 140474270605312, 140474287390719, +STORE, 140474287390720, 140474404823039, +STORE, 140474270605312, 140474287390719, +ERASE, 140474270605312, 140474287390719, +SNULL, 140474429972480, 140474438361087, +STORE, 140474438361088, 140474446753791, +STORE, 140474429972480, 140474438361087, +SNULL, 140474438365183, 140474446753791, +STORE, 140474438361088, 140474438365183, +STORE, 140474438365184, 140474446753791, +STORE, 140474815868928, 140474824261631, +SNULL, 140474815873023, 140474824261631, +STORE, 140474815868928, 140474815873023, +STORE, 140474815873024, 140474824261631, +SNULL, 140474220281855, 140474270605311, +STORE, 140474153172992, 140474220281855, +STORE, 140474220281856, 140474270605311, +ERASE, 140474220281856, 140474270605311, +SNULL, 140474488852479, 140474555826175, +STORE, 140474488717312, 140474488852479, +STORE, 140474488852480, 140474555826175, +SNULL, 140475101057024, 140475109449727, +STORE, 140475109449728, 140475117842431, +STORE, 140475101057024, 140475109449727, +SNULL, 140475109453823, 140475117842431, +STORE, 140475109449728, 140475109453823, +STORE, 140475109453824, 140475117842431, +SNULL, 140473951846399, 140474086064127, +STORE, 140473884737536, 140473951846399, +STORE, 140473951846400, 140474086064127, +SNULL, 140473951846400, 140474018955263, +STORE, 140474018955264, 140474086064127, +STORE, 140473951846400, 140474018955263, +ERASE, 140473951846400, 140474018955263, +SNULL, 140473884872703, 140473951846399, +STORE, 140473884737536, 140473884872703, +STORE, 140473884872704, 140473951846399, +SNULL, 140474019090431, 140474086064127, +STORE, 140474018955264, 140474019090431, +STORE, 140474019090432, 140474086064127, +SNULL, 140473750654975, 140473817628671, +STORE, 140473750519808, 140473750654975, +STORE, 140473750654976, 140473817628671, +SNULL, 140474455150592, 140474463539199, +STORE, 140474463539200, 140474488717311, +STORE, 140474455150592, 140474463539199, +SNULL, 140474463543295, 140474488717311, +STORE, 140474463539200, 140474463543295, +STORE, 140474463543296, 140474488717311, +STORE, 140474807476224, 140474815868927, +SNULL, 140474463543296, 140474471931903, +STORE, 140474471931904, 140474488717311, +STORE, 140474463543296, 140474471931903, +SNULL, 140474471935999, 140474488717311, +STORE, 140474471931904, 140474471935999, +STORE, 140474471936000, 140474488717311, +STORE, 140474799083520, 140474815868927, +STORE, 140474790690816, 140474815868927, +SNULL, 140474790690816, 140474799083519, +STORE, 140474799083520, 140474815868927, +STORE, 140474790690816, 140474799083519, +SNULL, 140474799087615, 140474815868927, +STORE, 140474799083520, 140474799087615, +STORE, 140474799087616, 140474815868927, +SNULL, 140474354499583, 140474404823039, +STORE, 140474287390720, 140474354499583, +STORE, 140474354499584, 140474404823039, +ERASE, 140474354499584, 140474404823039, +SNULL, 140474287525887, 140474354499583, +STORE, 140474287390720, 140474287525887, +STORE, 140474287525888, 140474354499583, +STORE, 140474782298112, 140474799083519, +STORE, 140474773905408, 140474799083519, +SNULL, 140474773909503, 140474799083519, +STORE, 140474773905408, 140474773909503, +STORE, 140474773909504, 140474799083519, +SNULL, 140475126239232, 140475134627839, +STORE, 140475134627840, 140475151413247, +STORE, 140475126239232, 140475134627839, +SNULL, 140475134631935, 140475151413247, +STORE, 140475134627840, 140475134631935, +STORE, 140475134631936, 140475151413247, +STORE, 140474765512704, 140474773905407, +STORE, 140474614542336, 140474622935039, +SNULL, 140474153308159, 140474220281855, +STORE, 140474153172992, 140474153308159, +STORE, 140474153308160, 140474220281855, +SNULL, 140474404827135, 140474413215743, +STORE, 140474404823040, 140474404827135, +STORE, 140474404827136, 140474413215743, +STORE, 140474606149632, 140474622935039, +SNULL, 140474606153727, 140474622935039, +STORE, 140474606149632, 140474606153727, +STORE, 140474606153728, 140474622935039, +STORE, 140474597756928, 140474606149631, +SNULL, 140474597761023, 140474606149631, +STORE, 140474597756928, 140474597761023, +STORE, 140474597761024, 140474606149631, +SNULL, 140475134631936, 140475143020543, +STORE, 140475143020544, 140475151413247, +STORE, 140475134631936, 140475143020543, +SNULL, 140475143024639, 140475151413247, +STORE, 140475143020544, 140475143024639, +STORE, 140475143024640, 140475151413247, +STORE, 140474589364224, 140474597756927, +SNULL, 140474606153728, 140474614542335, +STORE, 140474614542336, 140474622935039, +STORE, 140474606153728, 140474614542335, +SNULL, 140474614546431, 140474622935039, +STORE, 140474614542336, 140474614546431, +STORE, 140474614546432, 140474622935039, +SNULL, 140474765516799, 140474773905407, +STORE, 140474765512704, 140474765516799, +STORE, 140474765516800, 140474773905407, +STORE, 140474580971520, 140474597756927, +SNULL, 140474773909504, 140474782298111, +STORE, 140474782298112, 140474799083519, +STORE, 140474773909504, 140474782298111, +SNULL, 140474782302207, 140474799083519, +STORE, 140474782298112, 140474782302207, +STORE, 140474782302208, 140474799083519, +SNULL, 140474471936000, 140474480324607, +STORE, 140474480324608, 140474488717311, +STORE, 140474471936000, 140474480324607, +SNULL, 140474480328703, 140474488717311, +STORE, 140474480324608, 140474480328703, +STORE, 140474480328704, 140474488717311, +STORE, 140474572578816, 140474597756927, +SNULL, 140474572582911, 140474597756927, +STORE, 140474572578816, 140474572582911, +STORE, 140474572582912, 140474597756927, +SNULL, 140474782302208, 140474790690815, +STORE, 140474790690816, 140474799083519, +STORE, 140474782302208, 140474790690815, +SNULL, 140474790694911, 140474799083519, +STORE, 140474790690816, 140474790694911, +STORE, 140474790694912, 140474799083519, +STORE, 140474564186112, 140474572578815, +STORE, 140474421575680, 140474429968383, +STORE, 140474396430336, 140474404823039, +SNULL, 140474396434431, 140474404823039, +STORE, 140474396430336, 140474396434431, +STORE, 140474396434432, 140474404823039, +STORE, 140474388037632, 140474396430335, +SNULL, 140474799087616, 140474807476223, +STORE, 140474807476224, 140474815868927, +STORE, 140474799087616, 140474807476223, +SNULL, 140474807480319, 140474815868927, +STORE, 140474807476224, 140474807480319, +STORE, 140474807480320, 140474815868927, +SNULL, 140475101061119, 140475109449727, +STORE, 140475101057024, 140475101061119, +STORE, 140475101061120, 140475109449727, +STORE, 140474379644928, 140474396430335, +SNULL, 140474572582912, 140474589364223, +STORE, 140474589364224, 140474597756927, +STORE, 140474572582912, 140474589364223, +SNULL, 140474589368319, 140474597756927, +STORE, 140474589364224, 140474589368319, +STORE, 140474589368320, 140474597756927, +STORE, 140474371252224, 140474396430335, +STORE, 140474362859520, 140474396430335, +STORE, 140474278998016, 140474287390719, +STORE, 140474270605312, 140474287390719, +STORE, 140474262212608, 140474287390719, +SNULL, 140474262216703, 140474287390719, +STORE, 140474262212608, 140474262216703, +STORE, 140474262216704, 140474287390719, +STORE, 140474253819904, 140474262212607, +SNULL, 140474253823999, 140474262212607, +STORE, 140474253819904, 140474253823999, +STORE, 140474253824000, 140474262212607, +SNULL, 140474362859520, 140474388037631, +STORE, 140474388037632, 140474396430335, +STORE, 140474362859520, 140474388037631, +SNULL, 140474388041727, 140474396430335, +STORE, 140474388037632, 140474388041727, +STORE, 140474388041728, 140474396430335, +SNULL, 140474362859520, 140474379644927, +STORE, 140474379644928, 140474388037631, +STORE, 140474362859520, 140474379644927, +SNULL, 140474379649023, 140474388037631, +STORE, 140474379644928, 140474379649023, +STORE, 140474379649024, 140474388037631, +STORE, 140474245427200, 140474253819903, +STORE, 140474237034496, 140474253819903, +STORE, 140474228641792, 140474253819903, +STORE, 140474144780288, 140474153172991, +SNULL, 140474228645887, 140474253819903, +STORE, 140474228641792, 140474228645887, +STORE, 140474228645888, 140474253819903, +SNULL, 140474564190207, 140474572578815, +STORE, 140474564186112, 140474564190207, +STORE, 140474564190208, 140474572578815, +STORE, 140474136387584, 140474153172991, +SNULL, 140474362859520, 140474371252223, +STORE, 140474371252224, 140474379644927, +STORE, 140474362859520, 140474371252223, +SNULL, 140474371256319, 140474379644927, +STORE, 140474371252224, 140474371256319, +STORE, 140474371256320, 140474379644927, +STORE, 140474127994880, 140474153172991, +STORE, 140474119602176, 140474153172991, +SNULL, 140474421579775, 140474429968383, +STORE, 140474421575680, 140474421579775, +STORE, 140474421579776, 140474429968383, +STORE, 140474111209472, 140474153172991, +SNULL, 140474111213567, 140474153172991, +STORE, 140474111209472, 140474111213567, +STORE, 140474111213568, 140474153172991, +SNULL, 140474262216704, 140474270605311, +STORE, 140474270605312, 140474287390719, +STORE, 140474262216704, 140474270605311, +SNULL, 140474270609407, 140474287390719, +STORE, 140474270605312, 140474270609407, +STORE, 140474270609408, 140474287390719, +STORE, 140474102816768, 140474111209471, +SNULL, 140474102820863, 140474111209471, +STORE, 140474102816768, 140474102820863, +STORE, 140474102820864, 140474111209471, +SNULL, 140474270609408, 140474278998015, +STORE, 140474278998016, 140474287390719, +STORE, 140474270609408, 140474278998015, +SNULL, 140474279002111, 140474287390719, +STORE, 140474278998016, 140474279002111, +STORE, 140474279002112, 140474287390719, +STORE, 140474094424064, 140474102816767, +SNULL, 140474572582912, 140474580971519, +STORE, 140474580971520, 140474589364223, +STORE, 140474572582912, 140474580971519, +SNULL, 140474580975615, 140474589364223, +STORE, 140474580971520, 140474580975615, +STORE, 140474580975616, 140474589364223, +SNULL, 140474362863615, 140474371252223, +STORE, 140474362859520, 140474362863615, +STORE, 140474362863616, 140474371252223, +STORE, 140474010562560, 140474018955263, +SNULL, 140474228645888, 140474245427199, +STORE, 140474245427200, 140474253819903, +STORE, 140474228645888, 140474245427199, +SNULL, 140474245431295, 140474253819903, +STORE, 140474245427200, 140474245431295, +STORE, 140474245431296, 140474253819903, +SNULL, 140474111213568, 140474136387583, +STORE, 140474136387584, 140474153172991, +STORE, 140474111213568, 140474136387583, +SNULL, 140474136391679, 140474153172991, +STORE, 140474136387584, 140474136391679, +STORE, 140474136391680, 140474153172991, +STORE, 140474002169856, 140474018955263, +STORE, 140473993777152, 140474018955263, +SNULL, 140474111213568, 140474127994879, +STORE, 140474127994880, 140474136387583, +STORE, 140474111213568, 140474127994879, +SNULL, 140474127998975, 140474136387583, +STORE, 140474127994880, 140474127998975, +STORE, 140474127998976, 140474136387583, +SNULL, 140474228645888, 140474237034495, +STORE, 140474237034496, 140474245427199, +STORE, 140474228645888, 140474237034495, +SNULL, 140474237038591, 140474245427199, +STORE, 140474237034496, 140474237038591, +STORE, 140474237038592, 140474245427199, +SNULL, 140474136391680, 140474144780287, +STORE, 140474144780288, 140474153172991, +STORE, 140474136391680, 140474144780287, +SNULL, 140474144784383, 140474153172991, +STORE, 140474144780288, 140474144784383, +STORE, 140474144784384, 140474153172991, +STORE, 140473985384448, 140474018955263, +STORE, 140473976991744, 140474018955263, +STORE, 140473968599040, 140474018955263, +SNULL, 140473968603135, 140474018955263, +STORE, 140473968599040, 140473968603135, +STORE, 140473968603136, 140474018955263, +SNULL, 140474111213568, 140474119602175, +STORE, 140474119602176, 140474127994879, +STORE, 140474111213568, 140474119602175, +SNULL, 140474119606271, 140474127994879, +STORE, 140474119602176, 140474119606271, +STORE, 140474119606272, 140474127994879, +STORE, 140473960206336, 140473968599039, +SNULL, 140474094428159, 140474102816767, +STORE, 140474094424064, 140474094428159, +STORE, 140474094428160, 140474102816767, +STORE, 140473876344832, 140473884737535, +STORE, 140473867952128, 140473884737535, +STORE, 140473859559424, 140473884737535, +SNULL, 140473859563519, 140473884737535, +STORE, 140473859559424, 140473859563519, +STORE, 140473859563520, 140473884737535, +SNULL, 140473968603136, 140473993777151, +STORE, 140473993777152, 140474018955263, +STORE, 140473968603136, 140473993777151, +SNULL, 140473993781247, 140474018955263, +STORE, 140473993777152, 140473993781247, +STORE, 140473993781248, 140474018955263, +SNULL, 140473960210431, 140473968599039, +STORE, 140473960206336, 140473960210431, +STORE, 140473960210432, 140473968599039, +SNULL, 140473993781248, 140474010562559, +STORE, 140474010562560, 140474018955263, +STORE, 140473993781248, 140474010562559, +SNULL, 140474010566655, 140474018955263, +STORE, 140474010562560, 140474010566655, +STORE, 140474010566656, 140474018955263, +SNULL, 140473968603136, 140473985384447, +STORE, 140473985384448, 140473993777151, +STORE, 140473968603136, 140473985384447, +SNULL, 140473985388543, 140473993777151, +STORE, 140473985384448, 140473985388543, +STORE, 140473985388544, 140473993777151, +SNULL, 140473993781248, 140474002169855, +STORE, 140474002169856, 140474010562559, +STORE, 140473993781248, 140474002169855, +SNULL, 140474002173951, 140474010562559, +STORE, 140474002169856, 140474002173951, +STORE, 140474002173952, 140474010562559, +STORE, 140473851166720, 140473859559423, +SNULL, 140473851170815, 140473859559423, +STORE, 140473851166720, 140473851170815, +STORE, 140473851170816, 140473859559423, +SNULL, 140473968603136, 140473976991743, +STORE, 140473976991744, 140473985384447, +STORE, 140473968603136, 140473976991743, +SNULL, 140473976995839, 140473985384447, +STORE, 140473976991744, 140473976995839, +STORE, 140473976995840, 140473985384447, +STORE, 140473842774016, 140473851166719, +SNULL, 140473859563520, 140473867952127, +STORE, 140473867952128, 140473884737535, +STORE, 140473859563520, 140473867952127, +SNULL, 140473867956223, 140473884737535, +STORE, 140473867952128, 140473867956223, +STORE, 140473867956224, 140473884737535, +SNULL, 140473867956224, 140473876344831, +STORE, 140473876344832, 140473884737535, +STORE, 140473867956224, 140473876344831, +SNULL, 140473876348927, 140473884737535, +STORE, 140473876344832, 140473876348927, +STORE, 140473876348928, 140473884737535, +STORE, 140473834381312, 140473851166719, +SNULL, 140473834385407, 140473851166719, +STORE, 140473834381312, 140473834385407, +STORE, 140473834385408, 140473851166719, +SNULL, 140473834385408, 140473842774015, +STORE, 140473842774016, 140473851166719, +STORE, 140473834385408, 140473842774015, +SNULL, 140473842778111, 140473851166719, +STORE, 140473842774016, 140473842778111, +STORE, 140473842778112, 140473851166719, +STORE, 140473825988608, 140473834381311, +SNULL, 140473825992703, 140473834381311, +STORE, 140473825988608, 140473825992703, +STORE, 140473825992704, 140473834381311, +STORE, 140475577475072, 140475577503743, +STORE, 140475499917312, 140475502108671, +SNULL, 140475499917312, 140475500007423, +STORE, 140475500007424, 140475502108671, +STORE, 140475499917312, 140475500007423, +SNULL, 140475502100479, 140475502108671, +STORE, 140475500007424, 140475502100479, +STORE, 140475502100480, 140475502108671, +ERASE, 140475502100480, 140475502108671, +STORE, 140475502100480, 140475502108671, +SNULL, 140475502104575, 140475502108671, +STORE, 140475502100480, 140475502104575, +STORE, 140475502104576, 140475502108671, +ERASE, 140475577475072, 140475577503743, +ERASE, 140475235274752, 140475235278847, +ERASE, 140475235278848, 140475243667455, +ERASE, 140474815868928, 140474815873023, +ERASE, 140474815873024, 140474824261631, +ERASE, 140474606149632, 140474606153727, +ERASE, 140474606153728, 140474614542335, +ERASE, 140474270605312, 140474270609407, +ERASE, 140474270609408, 140474278998015, +ERASE, 140474438361088, 140474438365183, +ERASE, 140474438365184, 140474446753791, +ERASE, 140474597756928, 140474597761023, +ERASE, 140474597761024, 140474606149631, +ERASE, 140475126235136, 140475126239231, +ERASE, 140475126239232, 140475134627839, +ERASE, 140474463539200, 140474463543295, +ERASE, 140474463543296, 140474471931903, +ERASE, 140474388037632, 140474388041727, +ERASE, 140474388041728, 140474396430335, +ERASE, 140474404823040, 140474404827135, +ERASE, 140474404827136, 140474413215743, +ERASE, 140474278998016, 140474279002111, +ERASE, 140474279002112, 140474287390719, +ERASE, 140474094424064, 140474094428159, +ERASE, 140474094428160, 140474102816767, +ERASE, 140473867952128, 140473867956223, +ERASE, 140473867956224, 140473876344831, +ERASE, 140475151413248, 140475151417343, +ERASE, 140475151417344, 140475159805951, +ERASE, 140474455146496, 140474455150591, +ERASE, 140474455150592, 140474463539199, +ERASE, 140474807476224, 140474807480319, +ERASE, 140474807480320, 140474815868927, +ERASE, 140475117842432, 140475117846527, +ERASE, 140475117846528, 140475126235135, +ERASE, 140474446753792, 140474446757887, +ERASE, 140474446757888, 140474455146495, +ERASE, 140474429968384, 140474429972479, +ERASE, 140474429972480, 140474438361087, +ERASE, 140474782298112, 140474782302207, +ERASE, 140474782302208, 140474790690815, +ERASE, 140474136387584, 140474136391679, +ERASE, 140474136391680, 140474144780287, +ERASE, 140474002169856, 140474002173951, +ERASE, 140474002173952, 140474010562559, +ERASE, 140475134627840, 140475134631935, +ERASE, 140475134631936, 140475143020543, +ERASE, 140474471931904, 140474471935999, +ERASE, 140474471936000, 140474480324607, +ERASE, 140474396430336, 140474396434431, +ERASE, 140474396434432, 140474404823039, + }; + unsigned long set36[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140723893125120, 140737488351231, +SNULL, 140723893129215, 140737488351231, +STORE, 140723893125120, 140723893129215, +STORE, 140723892994048, 140723893129215, +STORE, 94076829786112, 94076832038911, +SNULL, 94076829917183, 94076832038911, +STORE, 94076829786112, 94076829917183, +STORE, 94076829917184, 94076832038911, +ERASE, 94076829917184, 94076832038911, +STORE, 94076832010240, 94076832018431, +STORE, 94076832018432, 94076832038911, +STORE, 140122444345344, 140122446598143, +SNULL, 140122444488703, 140122446598143, +STORE, 140122444345344, 140122444488703, +STORE, 140122444488704, 140122446598143, +ERASE, 140122444488704, 140122446598143, +STORE, 140122446585856, 140122446594047, +STORE, 140122446594048, 140122446598143, +STORE, 140723893538816, 140723893542911, +STORE, 140723893526528, 140723893538815, +STORE, 140122446557184, 140122446585855, +STORE, 140122446548992, 140122446557183, +STORE, 140122442129408, 140122444345343, +SNULL, 140122442129408, 140122442227711, +STORE, 140122442227712, 140122444345343, +STORE, 140122442129408, 140122442227711, +SNULL, 140122444320767, 140122444345343, +STORE, 140122442227712, 140122444320767, +STORE, 140122444320768, 140122444345343, +SNULL, 140122444320768, 140122444328959, +STORE, 140122444328960, 140122444345343, +STORE, 140122444320768, 140122444328959, +ERASE, 140122444320768, 140122444328959, +STORE, 140122444320768, 140122444328959, +ERASE, 140122444328960, 140122444345343, +STORE, 140122444328960, 140122444345343, +STORE, 140122438332416, 140122442129407, +SNULL, 140122438332416, 140122439991295, +STORE, 140122439991296, 140122442129407, +STORE, 140122438332416, 140122439991295, +SNULL, 140122442088447, 140122442129407, +STORE, 140122439991296, 140122442088447, +STORE, 140122442088448, 140122442129407, +SNULL, 140122442088448, 140122442113023, +STORE, 140122442113024, 140122442129407, +STORE, 140122442088448, 140122442113023, +ERASE, 140122442088448, 140122442113023, +STORE, 140122442088448, 140122442113023, +ERASE, 140122442113024, 140122442129407, +STORE, 140122442113024, 140122442129407, +STORE, 140122446540800, 140122446557183, +SNULL, 140122442104831, 140122442113023, +STORE, 140122442088448, 140122442104831, +STORE, 140122442104832, 140122442113023, +SNULL, 140122444324863, 140122444328959, +STORE, 140122444320768, 140122444324863, +STORE, 140122444324864, 140122444328959, +SNULL, 94076832014335, 94076832018431, +STORE, 94076832010240, 94076832014335, +STORE, 94076832014336, 94076832018431, +SNULL, 140122446589951, 140122446594047, +STORE, 140122446585856, 140122446589951, +STORE, 140122446589952, 140122446594047, +ERASE, 140122446557184, 140122446585855, +STORE, 94076845723648, 94076845858815, +STORE, 140122429939712, 140122438332415, +SNULL, 140122429943807, 140122438332415, +STORE, 140122429939712, 140122429943807, +STORE, 140122429943808, 140122438332415, +STORE, 140122421547008, 140122429939711, +STORE, 140122287329280, 140122421547007, +SNULL, 140122287329280, 140122301399039, +STORE, 140122301399040, 140122421547007, +STORE, 140122287329280, 140122301399039, +ERASE, 140122287329280, 140122301399039, +SNULL, 140122368507903, 140122421547007, +STORE, 140122301399040, 140122368507903, +STORE, 140122368507904, 140122421547007, +ERASE, 140122368507904, 140122421547007, +SNULL, 140122301534207, 140122368507903, +STORE, 140122301399040, 140122301534207, +STORE, 140122301534208, 140122368507903, +SNULL, 140122421551103, 140122429939711, +STORE, 140122421547008, 140122421551103, +STORE, 140122421551104, 140122429939711, +STORE, 140122413154304, 140122421547007, +SNULL, 140122413158399, 140122421547007, +STORE, 140122413154304, 140122413158399, +STORE, 140122413158400, 140122421547007, +STORE, 140122404761600, 140122413154303, +SNULL, 140122404765695, 140122413154303, +STORE, 140122404761600, 140122404765695, +STORE, 140122404765696, 140122413154303, +STORE, 140122396368896, 140122404761599, +SNULL, 140122396372991, 140122404761599, +STORE, 140122396368896, 140122396372991, +STORE, 140122396372992, 140122404761599, +STORE, 140122387976192, 140122396368895, +STORE, 140122167181312, 140122301399039, +SNULL, 140122234290175, 140122301399039, +STORE, 140122167181312, 140122234290175, +STORE, 140122234290176, 140122301399039, +ERASE, 140122234290176, 140122301399039, +SNULL, 140122167316479, 140122234290175, +STORE, 140122167181312, 140122167316479, +STORE, 140122167316480, 140122234290175, +STORE, 140122379583488, 140122396368895, +STORE, 140122371190784, 140122396368895, +STORE, 140122167316480, 140122301399039, +STORE, 140122158788608, 140122167181311, +SNULL, 140122371190784, 140122387976191, +STORE, 140122387976192, 140122396368895, +STORE, 140122371190784, 140122387976191, +SNULL, 140122387980287, 140122396368895, +STORE, 140122387976192, 140122387980287, +STORE, 140122387980288, 140122396368895, +SNULL, 140122167316480, 140122234290175, +STORE, 140122234290176, 140122301399039, +STORE, 140122167316480, 140122234290175, +SNULL, 140122234425343, 140122301399039, +STORE, 140122234290176, 140122234425343, +STORE, 140122234425344, 140122301399039, +STORE, 140122024570880, 140122158788607, +SNULL, 140122024570880, 140122032963583, +STORE, 140122032963584, 140122158788607, +STORE, 140122024570880, 140122032963583, +ERASE, 140122024570880, 140122032963583, +STORE, 140121898745856, 140122158788607, +STORE, 140121890353152, 140121898745855, +SNULL, 140122100072447, 140122158788607, +STORE, 140121898745856, 140122100072447, +STORE, 140122100072448, 140122158788607, +ERASE, 140122100072448, 140122158788607, +SNULL, 140121965854719, 140122100072447, +STORE, 140121898745856, 140121965854719, +STORE, 140121965854720, 140122100072447, +SNULL, 140121965854720, 140122032963583, +STORE, 140122032963584, 140122100072447, +STORE, 140121965854720, 140122032963583, +ERASE, 140121965854720, 140122032963583, +SNULL, 140121898881023, 140121965854719, +STORE, 140121898745856, 140121898881023, +STORE, 140121898881024, 140121965854719, +SNULL, 140121890357247, 140121898745855, +STORE, 140121890353152, 140121890357247, +STORE, 140121890357248, 140121898745855, +SNULL, 140122371190784, 140122379583487, +STORE, 140122379583488, 140122387976191, +STORE, 140122371190784, 140122379583487, +SNULL, 140122379587583, 140122387976191, +STORE, 140122379583488, 140122379587583, +STORE, 140122379587584, 140122387976191, +SNULL, 140122033098751, 140122100072447, +STORE, 140122032963584, 140122033098751, +STORE, 140122033098752, 140122100072447, +SNULL, 140122158792703, 140122167181311, +STORE, 140122158788608, 140122158792703, +STORE, 140122158792704, 140122167181311, +STORE, 140122150395904, 140122158788607, +STORE, 140122142003200, 140122158788607, +SNULL, 140122142007295, 140122158788607, +STORE, 140122142003200, 140122142007295, +STORE, 140122142007296, 140122158788607, +SNULL, 140122371194879, 140122379583487, +STORE, 140122371190784, 140122371194879, +STORE, 140122371194880, 140122379583487, +SNULL, 140122142007296, 140122150395903, +STORE, 140122150395904, 140122158788607, +STORE, 140122142007296, 140122150395903, +SNULL, 140122150399999, 140122158788607, +STORE, 140122150395904, 140122150399999, +STORE, 140122150400000, 140122158788607, +STORE, 140122133610496, 140122142003199, +STORE, 140122125217792, 140122142003199, +STORE, 140122116825088, 140122142003199, +SNULL, 140122116829183, 140122142003199, +STORE, 140122116825088, 140122116829183, +STORE, 140122116829184, 140122142003199, +SNULL, 140122116829184, 140122133610495, +STORE, 140122133610496, 140122142003199, +STORE, 140122116829184, 140122133610495, +SNULL, 140122133614591, 140122142003199, +STORE, 140122133610496, 140122133614591, +STORE, 140122133614592, 140122142003199, +SNULL, 140122116829184, 140122125217791, +STORE, 140122125217792, 140122133610495, +STORE, 140122116829184, 140122125217791, +SNULL, 140122125221887, 140122133610495, +STORE, 140122125217792, 140122125221887, +STORE, 140122125221888, 140122133610495, +STORE, 140122108432384, 140122116825087, +SNULL, 140122108436479, 140122116825087, +STORE, 140122108432384, 140122108436479, +STORE, 140122108436480, 140122116825087, +STORE, 140122024570880, 140122032963583, +STORE, 140122016178176, 140122032963583, +SNULL, 140122016182271, 140122032963583, +STORE, 140122016178176, 140122016182271, +STORE, 140122016182272, 140122032963583, +SNULL, 140122016182272, 140122024570879, +STORE, 140122024570880, 140122032963583, +STORE, 140122016182272, 140122024570879, +SNULL, 140122024574975, 140122032963583, +STORE, 140122024570880, 140122024574975, +STORE, 140122024574976, 140122032963583, +STORE, 140122007785472, 140122016178175, +SNULL, 140122007789567, 140122016178175, +STORE, 140122007785472, 140122007789567, +STORE, 140122007789568, 140122016178175, +STORE, 140121999392768, 140122007785471, +STORE, 140121991000064, 140122007785471, +SNULL, 140121991004159, 140122007785471, +STORE, 140121991000064, 140121991004159, +STORE, 140121991004160, 140122007785471, +SNULL, 140121991004160, 140121999392767, +STORE, 140121999392768, 140122007785471, +STORE, 140121991004160, 140121999392767, +SNULL, 140121999396863, 140122007785471, +STORE, 140121999392768, 140121999396863, +STORE, 140121999396864, 140122007785471, +STORE, 140121982607360, 140121991000063, +STORE, 140121823244288, 140121890353151, +ERASE, 140121823244288, 140121890353151, +STORE, 140121756135424, 140121890353151, +SNULL, 140121756135424, 140121764528127, +STORE, 140121764528128, 140121890353151, +STORE, 140121756135424, 140121764528127, +ERASE, 140121756135424, 140121764528127, +SNULL, 140121831636991, 140121890353151, +STORE, 140121764528128, 140121831636991, +STORE, 140121831636992, 140121890353151, +ERASE, 140121831636992, 140121890353151, +STORE, 140121974214656, 140121991000063, +STORE, 140121630310400, 140121831636991, +SNULL, 140121697419263, 140121831636991, +STORE, 140121630310400, 140121697419263, +STORE, 140121697419264, 140121831636991, +SNULL, 140121697419264, 140121764528127, +STORE, 140121764528128, 140121831636991, +STORE, 140121697419264, 140121764528127, +ERASE, 140121697419264, 140121764528127, +STORE, 140121881960448, 140121890353151, +STORE, 140121630310400, 140121831636991, +STORE, 140121873567744, 140121890353151, +SNULL, 140121630310400, 140121697419263, +STORE, 140121697419264, 140121831636991, +STORE, 140121630310400, 140121697419263, +SNULL, 140121697554431, 140121831636991, +STORE, 140121697419264, 140121697554431, +STORE, 140121697554432, 140121831636991, +STORE, 140121865175040, 140121890353151, +STORE, 140121856782336, 140121890353151, +STORE, 140121848389632, 140121890353151, +STORE, 140121839996928, 140121890353151, +STORE, 140121496092672, 140121697419263, +STORE, 140121487699968, 140121496092671, +STORE, 140121420591104, 140121487699967, +STORE, 140121412198400, 140121420591103, +ERASE, 140121420591104, 140121487699967, +STORE, 140121479307264, 140121496092671, +STORE, 140121277980672, 140121412198399, +SNULL, 140121277980672, 140121294766079, +STORE, 140121294766080, 140121412198399, +STORE, 140121277980672, 140121294766079, +ERASE, 140121277980672, 140121294766079, +STORE, 140121470914560, 140121496092671, +STORE, 140121462521856, 140121496092671, +STORE, 140121160548352, 140121412198399, +STORE, 140121454129152, 140121496092671, +SNULL, 140121227657215, 140121412198399, +STORE, 140121160548352, 140121227657215, +STORE, 140121227657216, 140121412198399, +SNULL, 140121227657216, 140121294766079, +STORE, 140121294766080, 140121412198399, +STORE, 140121227657216, 140121294766079, +ERASE, 140121227657216, 140121294766079, +STORE, 140121445736448, 140121496092671, +STORE, 140121437343744, 140121496092671, +SNULL, 140121437343744, 140121445736447, +STORE, 140121445736448, 140121496092671, +STORE, 140121437343744, 140121445736447, +SNULL, 140121445740543, 140121496092671, +STORE, 140121445736448, 140121445740543, +STORE, 140121445740544, 140121496092671, +SNULL, 140121697554432, 140121764528127, +STORE, 140121764528128, 140121831636991, +STORE, 140121697554432, 140121764528127, +SNULL, 140121764663295, 140121831636991, +STORE, 140121764528128, 140121764663295, +STORE, 140121764663296, 140121831636991, +SNULL, 140121496092672, 140121630310399, +STORE, 140121630310400, 140121697419263, +STORE, 140121496092672, 140121630310399, +SNULL, 140121630445567, 140121697419263, +STORE, 140121630310400, 140121630445567, +STORE, 140121630445568, 140121697419263, +SNULL, 140121445740544, 140121454129151, +STORE, 140121454129152, 140121496092671, +STORE, 140121445740544, 140121454129151, +SNULL, 140121454133247, 140121496092671, +STORE, 140121454129152, 140121454133247, +STORE, 140121454133248, 140121496092671, +STORE, 140121026330624, 140121227657215, +SNULL, 140121093439487, 140121227657215, +STORE, 140121026330624, 140121093439487, +STORE, 140121093439488, 140121227657215, +SNULL, 140121093439488, 140121160548351, +STORE, 140121160548352, 140121227657215, +STORE, 140121093439488, 140121160548351, +ERASE, 140121093439488, 140121160548351, +SNULL, 140121563201535, 140121630310399, +STORE, 140121496092672, 140121563201535, +STORE, 140121563201536, 140121630310399, +ERASE, 140121563201536, 140121630310399, +STORE, 140120892112896, 140121093439487, +SNULL, 140120959221759, 140121093439487, +STORE, 140120892112896, 140120959221759, +STORE, 140120959221760, 140121093439487, +SNULL, 140120959221760, 140121026330623, +STORE, 140121026330624, 140121093439487, +STORE, 140120959221760, 140121026330623, +ERASE, 140120959221760, 140121026330623, +STORE, 140120757895168, 140120959221759, +SNULL, 140121361874943, 140121412198399, +STORE, 140121294766080, 140121361874943, +STORE, 140121361874944, 140121412198399, +ERASE, 140121361874944, 140121412198399, +SNULL, 140121294901247, 140121361874943, +STORE, 140121294766080, 140121294901247, +STORE, 140121294901248, 140121361874943, +STORE, 140120623677440, 140120959221759, +SNULL, 140120690786303, 140120959221759, +STORE, 140120623677440, 140120690786303, +STORE, 140120690786304, 140120959221759, +SNULL, 140120690786304, 140120757895167, +STORE, 140120757895168, 140120959221759, +STORE, 140120690786304, 140120757895167, +ERASE, 140120690786304, 140120757895167, +SNULL, 140121160683519, 140121227657215, +STORE, 140121160548352, 140121160683519, +STORE, 140121160683520, 140121227657215, +SNULL, 140121974214656, 140121982607359, +STORE, 140121982607360, 140121991000063, +STORE, 140121974214656, 140121982607359, +SNULL, 140121982611455, 140121991000063, +STORE, 140121982607360, 140121982611455, +STORE, 140121982611456, 140121991000063, +SNULL, 140121839996928, 140121873567743, +STORE, 140121873567744, 140121890353151, +STORE, 140121839996928, 140121873567743, +SNULL, 140121873571839, 140121890353151, +STORE, 140121873567744, 140121873571839, +STORE, 140121873571840, 140121890353151, +SNULL, 140121873571840, 140121881960447, +STORE, 140121881960448, 140121890353151, +STORE, 140121873571840, 140121881960447, +SNULL, 140121881964543, 140121890353151, +STORE, 140121881960448, 140121881964543, +STORE, 140121881964544, 140121890353151, +SNULL, 140121840001023, 140121873567743, +STORE, 140121839996928, 140121840001023, +STORE, 140121840001024, 140121873567743, +SNULL, 140121840001024, 140121865175039, +STORE, 140121865175040, 140121873567743, +STORE, 140121840001024, 140121865175039, +SNULL, 140121865179135, 140121873567743, +STORE, 140121865175040, 140121865179135, +STORE, 140121865179136, 140121873567743, +SNULL, 140121437347839, 140121445736447, +STORE, 140121437343744, 140121437347839, +STORE, 140121437347840, 140121445736447, +STORE, 140121621917696, 140121630310399, +STORE, 140121613524992, 140121630310399, +SNULL, 140121026465791, 140121093439487, +STORE, 140121026330624, 140121026465791, +STORE, 140121026465792, 140121093439487, +SNULL, 140121496227839, 140121563201535, +STORE, 140121496092672, 140121496227839, +STORE, 140121496227840, 140121563201535, +SNULL, 140120757895168, 140120892112895, +STORE, 140120892112896, 140120959221759, +STORE, 140120757895168, 140120892112895, +SNULL, 140120892248063, 140120959221759, +STORE, 140120892112896, 140120892248063, +STORE, 140120892248064, 140120959221759, +SNULL, 140120825004031, 140120892112895, +STORE, 140120757895168, 140120825004031, +STORE, 140120825004032, 140120892112895, +ERASE, 140120825004032, 140120892112895, +SNULL, 140120623812607, 140120690786303, +STORE, 140120623677440, 140120623812607, +STORE, 140120623812608, 140120690786303, +SNULL, 140120758030335, 140120825004031, +STORE, 140120757895168, 140120758030335, +STORE, 140120758030336, 140120825004031, +SNULL, 140121454133248, 140121462521855, +STORE, 140121462521856, 140121496092671, +STORE, 140121454133248, 140121462521855, +SNULL, 140121462525951, 140121496092671, +STORE, 140121462521856, 140121462525951, +STORE, 140121462525952, 140121496092671, +STORE, 140121605132288, 140121630310399, +SNULL, 140121605136383, 140121630310399, +STORE, 140121605132288, 140121605136383, +STORE, 140121605136384, 140121630310399, +STORE, 140121596739584, 140121605132287, +SNULL, 140121605136384, 140121621917695, +STORE, 140121621917696, 140121630310399, +STORE, 140121605136384, 140121621917695, +SNULL, 140121621921791, 140121630310399, +STORE, 140121621917696, 140121621921791, +STORE, 140121621921792, 140121630310399, +STORE, 140121588346880, 140121605132287, +STORE, 140121579954176, 140121605132287, +SNULL, 140121412202495, 140121420591103, +STORE, 140121412198400, 140121412202495, +STORE, 140121412202496, 140121420591103, +SNULL, 140121974218751, 140121982607359, +STORE, 140121974214656, 140121974218751, +STORE, 140121974218752, 140121982607359, +SNULL, 140121462525952, 140121479307263, +STORE, 140121479307264, 140121496092671, +STORE, 140121462525952, 140121479307263, +SNULL, 140121479311359, 140121496092671, +STORE, 140121479307264, 140121479311359, +STORE, 140121479311360, 140121496092671, +STORE, 140121571561472, 140121605132287, +SNULL, 140121571565567, 140121605132287, +STORE, 140121571561472, 140121571565567, +STORE, 140121571565568, 140121605132287, +STORE, 140121428951040, 140121437343743, +SNULL, 140121428955135, 140121437343743, +STORE, 140121428951040, 140121428955135, +STORE, 140121428955136, 140121437343743, +SNULL, 140121840001024, 140121856782335, +STORE, 140121856782336, 140121865175039, +STORE, 140121840001024, 140121856782335, +SNULL, 140121856786431, 140121865175039, +STORE, 140121856782336, 140121856786431, +STORE, 140121856786432, 140121865175039, +STORE, 140121403805696, 140121412198399, +SNULL, 140121840001024, 140121848389631, +STORE, 140121848389632, 140121856782335, +STORE, 140121840001024, 140121848389631, +SNULL, 140121848393727, 140121856782335, +STORE, 140121848389632, 140121848393727, +STORE, 140121848393728, 140121856782335, +SNULL, 140121479311360, 140121487699967, +STORE, 140121487699968, 140121496092671, +STORE, 140121479311360, 140121487699967, +SNULL, 140121487704063, 140121496092671, +STORE, 140121487699968, 140121487704063, +STORE, 140121487704064, 140121496092671, +STORE, 140121395412992, 140121412198399, +STORE, 140121387020288, 140121412198399, +SNULL, 140121387024383, 140121412198399, +STORE, 140121387020288, 140121387024383, +STORE, 140121387024384, 140121412198399, +SNULL, 140121605136384, 140121613524991, +STORE, 140121613524992, 140121621917695, +STORE, 140121605136384, 140121613524991, +SNULL, 140121613529087, 140121621917695, +STORE, 140121613524992, 140121613529087, +STORE, 140121613529088, 140121621917695, +SNULL, 140121462525952, 140121470914559, +STORE, 140121470914560, 140121479307263, +STORE, 140121462525952, 140121470914559, +SNULL, 140121470918655, 140121479307263, +STORE, 140121470914560, 140121470918655, +STORE, 140121470918656, 140121479307263, +STORE, 140121378627584, 140121387020287, +SNULL, 140121378631679, 140121387020287, +STORE, 140121378627584, 140121378631679, +STORE, 140121378631680, 140121387020287, +SNULL, 140121571565568, 140121596739583, +STORE, 140121596739584, 140121605132287, +STORE, 140121571565568, 140121596739583, +SNULL, 140121596743679, 140121605132287, +STORE, 140121596739584, 140121596743679, +STORE, 140121596743680, 140121605132287, +SNULL, 140121387024384, 140121403805695, +STORE, 140121403805696, 140121412198399, +STORE, 140121387024384, 140121403805695, +SNULL, 140121403809791, 140121412198399, +STORE, 140121403805696, 140121403809791, +STORE, 140121403809792, 140121412198399, +STORE, 140121370234880, 140121378627583, +SNULL, 140121387024384, 140121395412991, +STORE, 140121395412992, 140121403805695, +STORE, 140121387024384, 140121395412991, +SNULL, 140121395417087, 140121403805695, +STORE, 140121395412992, 140121395417087, +STORE, 140121395417088, 140121403805695, +SNULL, 140121571565568, 140121588346879, +STORE, 140121588346880, 140121596739583, +STORE, 140121571565568, 140121588346879, +SNULL, 140121588350975, 140121596739583, +STORE, 140121588346880, 140121588350975, +STORE, 140121588350976, 140121596739583, +SNULL, 140121571565568, 140121579954175, +STORE, 140121579954176, 140121588346879, +STORE, 140121571565568, 140121579954175, +SNULL, 140121579958271, 140121588346879, +STORE, 140121579954176, 140121579958271, +STORE, 140121579958272, 140121588346879, +STORE, 140121286373376, 140121294766079, +STORE, 140121277980672, 140121294766079, +SNULL, 140121277980672, 140121286373375, +STORE, 140121286373376, 140121294766079, +STORE, 140121277980672, 140121286373375, +SNULL, 140121286377471, 140121294766079, +STORE, 140121286373376, 140121286377471, +STORE, 140121286377472, 140121294766079, +STORE, 140121269587968, 140121286373375, +STORE, 140121261195264, 140121286373375, +SNULL, 140121261195264, 140121269587967, +STORE, 140121269587968, 140121286373375, +STORE, 140121261195264, 140121269587967, +SNULL, 140121269592063, 140121286373375, +STORE, 140121269587968, 140121269592063, +STORE, 140121269592064, 140121286373375, +STORE, 140121252802560, 140121269587967, +SNULL, 140121252806655, 140121269587967, +STORE, 140121252802560, 140121252806655, +STORE, 140121252806656, 140121269587967, +STORE, 140121244409856, 140121252802559, +STORE, 140121236017152, 140121252802559, +SNULL, 140121236017152, 140121244409855, +STORE, 140121244409856, 140121252802559, +STORE, 140121236017152, 140121244409855, +SNULL, 140121244413951, 140121252802559, +STORE, 140121244409856, 140121244413951, +STORE, 140121244413952, 140121252802559, +SNULL, 140121370238975, 140121378627583, +STORE, 140121370234880, 140121370238975, +STORE, 140121370238976, 140121378627583, +STORE, 140121152155648, 140121160548351, +STORE, 140121143762944, 140121160548351, +STORE, 140121135370240, 140121160548351, +SNULL, 140121135374335, 140121160548351, +STORE, 140121135370240, 140121135374335, +STORE, 140121135374336, 140121160548351, +STORE, 140121126977536, 140121135370239, +STORE, 140121118584832, 140121135370239, +STORE, 140121110192128, 140121135370239, +SNULL, 140121110192128, 140121118584831, +STORE, 140121118584832, 140121135370239, +STORE, 140121110192128, 140121118584831, +SNULL, 140121118588927, 140121135370239, +STORE, 140121118584832, 140121118588927, +STORE, 140121118588928, 140121135370239, +STORE, 140121101799424, 140121118584831, +STORE, 140121017937920, 140121026330623, +STORE, 140121009545216, 140121026330623, +SNULL, 140121009545216, 140121017937919, +STORE, 140121017937920, 140121026330623, +STORE, 140121009545216, 140121017937919, +SNULL, 140121017942015, 140121026330623, +STORE, 140121017937920, 140121017942015, +STORE, 140121017942016, 140121026330623, +SNULL, 140121269592064, 140121277980671, +STORE, 140121277980672, 140121286373375, +STORE, 140121269592064, 140121277980671, +SNULL, 140121277984767, 140121286373375, +STORE, 140121277980672, 140121277984767, +STORE, 140121277984768, 140121286373375, +STORE, 140121001152512, 140121017937919, +SNULL, 140121252806656, 140121261195263, +STORE, 140121261195264, 140121269587967, +STORE, 140121252806656, 140121261195263, +SNULL, 140121261199359, 140121269587967, +STORE, 140121261195264, 140121261199359, +STORE, 140121261199360, 140121269587967, +SNULL, 140121135374336, 140121152155647, +STORE, 140121152155648, 140121160548351, +STORE, 140121135374336, 140121152155647, +SNULL, 140121152159743, 140121160548351, +STORE, 140121152155648, 140121152159743, +STORE, 140121152159744, 140121160548351, +STORE, 140120992759808, 140121017937919, +STORE, 140120984367104, 140121017937919, +STORE, 140120975974400, 140121017937919, +SNULL, 140121101799424, 140121110192127, +STORE, 140121110192128, 140121118584831, +STORE, 140121101799424, 140121110192127, +SNULL, 140121110196223, 140121118584831, +STORE, 140121110192128, 140121110196223, +STORE, 140121110196224, 140121118584831, +SNULL, 140121118588928, 140121126977535, +STORE, 140121126977536, 140121135370239, +STORE, 140121118588928, 140121126977535, +SNULL, 140121126981631, 140121135370239, +STORE, 140121126977536, 140121126981631, +STORE, 140121126981632, 140121135370239, +STORE, 140120967581696, 140121017937919, +STORE, 140120883720192, 140120892112895, +SNULL, 140120883724287, 140120892112895, +STORE, 140120883720192, 140120883724287, +STORE, 140120883724288, 140120892112895, +STORE, 140120875327488, 140120883720191, +SNULL, 140121101803519, 140121110192127, +STORE, 140121101799424, 140121101803519, +STORE, 140121101803520, 140121110192127, +SNULL, 140121135374336, 140121143762943, +STORE, 140121143762944, 140121152155647, +STORE, 140121135374336, 140121143762943, +SNULL, 140121143767039, 140121152155647, +STORE, 140121143762944, 140121143767039, +STORE, 140121143767040, 140121152155647, +STORE, 140120866934784, 140120883720191, +SNULL, 140120967581696, 140120984367103, +STORE, 140120984367104, 140121017937919, +STORE, 140120967581696, 140120984367103, +SNULL, 140120984371199, 140121017937919, +STORE, 140120984367104, 140120984371199, +STORE, 140120984371200, 140121017937919, +STORE, 140120858542080, 140120883720191, +SNULL, 140121236021247, 140121244409855, +STORE, 140121236017152, 140121236021247, +STORE, 140121236021248, 140121244409855, +SNULL, 140120984371200, 140121009545215, +STORE, 140121009545216, 140121017937919, +STORE, 140120984371200, 140121009545215, +SNULL, 140121009549311, 140121017937919, +STORE, 140121009545216, 140121009549311, +STORE, 140121009549312, 140121017937919, +SNULL, 140120984371200, 140120992759807, +STORE, 140120992759808, 140121009545215, +STORE, 140120984371200, 140120992759807, +SNULL, 140120992763903, 140121009545215, +STORE, 140120992759808, 140120992763903, +STORE, 140120992763904, 140121009545215, +SNULL, 140120992763904, 140121001152511, +STORE, 140121001152512, 140121009545215, +STORE, 140120992763904, 140121001152511, +SNULL, 140121001156607, 140121009545215, +STORE, 140121001152512, 140121001156607, +STORE, 140121001156608, 140121009545215, +STORE, 140120850149376, 140120883720191, +SNULL, 140120850153471, 140120883720191, +STORE, 140120850149376, 140120850153471, +STORE, 140120850153472, 140120883720191, +SNULL, 140120967585791, 140120984367103, +STORE, 140120967581696, 140120967585791, +STORE, 140120967585792, 140120984367103, +SNULL, 140120850153472, 140120866934783, +STORE, 140120866934784, 140120883720191, +STORE, 140120850153472, 140120866934783, +SNULL, 140120866938879, 140120883720191, +STORE, 140120866934784, 140120866938879, +STORE, 140120866938880, 140120883720191, +STORE, 140120841756672, 140120850149375, +SNULL, 140120967585792, 140120975974399, +STORE, 140120975974400, 140120984367103, +STORE, 140120967585792, 140120975974399, +SNULL, 140120975978495, 140120984367103, +STORE, 140120975974400, 140120975978495, +STORE, 140120975978496, 140120984367103, +SNULL, 140120866938880, 140120875327487, +STORE, 140120875327488, 140120883720191, +STORE, 140120866938880, 140120875327487, +SNULL, 140120875331583, 140120883720191, +STORE, 140120875327488, 140120875331583, +STORE, 140120875331584, 140120883720191, +STORE, 140120833363968, 140120850149375, +STORE, 140120749502464, 140120757895167, +STORE, 140120741109760, 140120757895167, +STORE, 140120732717056, 140120757895167, +STORE, 140120724324352, 140120757895167, +SNULL, 140120724324352, 140120732717055, +STORE, 140120732717056, 140120757895167, +STORE, 140120724324352, 140120732717055, +SNULL, 140120732721151, 140120757895167, +STORE, 140120732717056, 140120732721151, +STORE, 140120732721152, 140120757895167, +STORE, 140120715931648, 140120732717055, +SNULL, 140120715935743, 140120732717055, +STORE, 140120715931648, 140120715935743, +STORE, 140120715935744, 140120732717055, +SNULL, 140120850153472, 140120858542079, +STORE, 140120858542080, 140120866934783, +STORE, 140120850153472, 140120858542079, +SNULL, 140120858546175, 140120866934783, +STORE, 140120858542080, 140120858546175, +STORE, 140120858546176, 140120866934783, +STORE, 140120707538944, 140120715931647, +SNULL, 140120707543039, 140120715931647, +STORE, 140120707538944, 140120707543039, +STORE, 140120707543040, 140120715931647, +SNULL, 140120833368063, 140120850149375, +STORE, 140120833363968, 140120833368063, +STORE, 140120833368064, 140120850149375, +SNULL, 140120833368064, 140120841756671, +STORE, 140120841756672, 140120850149375, +STORE, 140120833368064, 140120841756671, +SNULL, 140120841760767, 140120850149375, +STORE, 140120841756672, 140120841760767, +STORE, 140120841760768, 140120850149375, +STORE, 140120699146240, 140120707538943, +SNULL, 140120715935744, 140120724324351, +STORE, 140120724324352, 140120732717055, +STORE, 140120715935744, 140120724324351, +SNULL, 140120724328447, 140120732717055, +STORE, 140120724324352, 140120724328447, +STORE, 140120724328448, 140120732717055, +SNULL, 140120732721152, 140120741109759, +STORE, 140120741109760, 140120757895167, +STORE, 140120732721152, 140120741109759, +SNULL, 140120741113855, 140120757895167, +STORE, 140120741109760, 140120741113855, +STORE, 140120741113856, 140120757895167, +SNULL, 140120741113856, 140120749502463, +STORE, 140120749502464, 140120757895167, +STORE, 140120741113856, 140120749502463, +SNULL, 140120749506559, 140120757895167, +STORE, 140120749502464, 140120749506559, +STORE, 140120749506560, 140120757895167, +SNULL, 140120699150335, 140120707538943, +STORE, 140120699146240, 140120699150335, +STORE, 140120699150336, 140120707538943, +STORE, 140122446557184, 140122446585855, +STORE, 140122368999424, 140122371190783, +SNULL, 140122368999424, 140122369089535, +STORE, 140122369089536, 140122371190783, +STORE, 140122368999424, 140122369089535, +SNULL, 140122371182591, 140122371190783, +STORE, 140122369089536, 140122371182591, +STORE, 140122371182592, 140122371190783, +ERASE, 140122371182592, 140122371190783, +STORE, 140122371182592, 140122371190783, +SNULL, 140122371186687, 140122371190783, +STORE, 140122371182592, 140122371186687, +STORE, 140122371186688, 140122371190783, +ERASE, 140122446557184, 140122446585855, +ERASE, 140121445736448, 140121445740543, +ERASE, 140121445740544, 140121454129151, +ERASE, 140121621917696, 140121621921791, +ERASE, 140121621921792, 140121630310399, +ERASE, 140121579954176, 140121579958271, +ERASE, 140121579958272, 140121588346879, +ERASE, 140121261195264, 140121261199359, +ERASE, 140121261199360, 140121269587967, +ERASE, 140121454129152, 140121454133247, +ERASE, 140121454133248, 140121462521855, +ERASE, 140121588346880, 140121588350975, +ERASE, 140121588350976, 140121596739583, +ERASE, 140121135370240, 140121135374335, +ERASE, 140121135374336, 140121143762943, +ERASE, 140121881960448, 140121881964543, +ERASE, 140121881964544, 140121890353151, +ERASE, 140121428951040, 140121428955135, +ERASE, 140121428955136, 140121437343743, +ERASE, 140121387020288, 140121387024383, +ERASE, 140121387024384, 140121395412991, +ERASE, 140121487699968, 140121487704063, +ERASE, 140121487704064, 140121496092671, +ERASE, 140121437343744, 140121437347839, +ERASE, 140121437347840, 140121445736447, +ERASE, 140121613524992, 140121613529087, +ERASE, 140121613529088, 140121621917695, +ERASE, 140121856782336, 140121856786431, +ERASE, 140121856786432, 140121865175039, +ERASE, 140121252802560, 140121252806655, +ERASE, 140121252806656, 140121261195263, +ERASE, 140121839996928, 140121840001023, +ERASE, 140121840001024, 140121848389631, +ERASE, 140121596739584, 140121596743679, +ERASE, 140121596743680, 140121605132287, +ERASE, 140121009545216, 140121009549311, +ERASE, 140121009549312, 140121017937919, +ERASE, 140120724324352, 140120724328447, +ERASE, 140120724328448, 140120732717055, +ERASE, 140120883720192, 140120883724287, +ERASE, 140120883724288, 140120892112895, +ERASE, 140121982607360, 140121982611455, +ERASE, 140121982611456, 140121991000063, +ERASE, 140121571561472, 140121571565567, +ERASE, 140121571565568, 140121579954175, +ERASE, 140121286373376, 140121286377471, +ERASE, 140121286377472, 140121294766079, +ERASE, 140120875327488, 140120875331583, +ERASE, 140120875331584, 140120883720191, +ERASE, 140121848389632, 140121848393727, +ERASE, 140121848393728, 140121856782335, +ERASE, 140121370234880, 140121370238975, +ERASE, 140121370238976, 140121378627583, +ERASE, 140121143762944, 140121143767039, +ERASE, 140121143767040, 140121152155647, +ERASE, 140121118584832, 140121118588927, +ERASE, 140121118588928, 140121126977535, +ERASE, 140120866934784, 140120866938879, +ERASE, 140120866938880, 140120875327487, +ERASE, 140120741109760, 140120741113855, +ERASE, 140120741113856, 140120749502463, +ERASE, 140121865175040, 140121865179135, +ERASE, 140121865179136, 140121873567743, +ERASE, 140121403805696, 140121403809791, +ERASE, 140121403809792, 140121412198399, +ERASE, 140121236017152, 140121236021247, +ERASE, 140121236021248, 140121244409855, +ERASE, 140120732717056, 140120732721151, +ERASE, 140120732721152, 140120741109759, +ERASE, 140121017937920, 140121017942015, +ERASE, 140121017942016, 140121026330623, +ERASE, 140121873567744, 140121873571839, +ERASE, 140121873571840, 140121881960447, +ERASE, 140121470914560, 140121470918655, +ERASE, 140121470918656, 140121479307263, +ERASE, 140121126977536, 140121126981631, +ERASE, 140121126981632, 140121135370239, +ERASE, 140120850149376, 140120850153471, +ERASE, 140120850153472, 140120858542079, +ERASE, 140120707538944, 140120707543039, +ERASE, 140120707543040, 140120715931647, +ERASE, 140121479307264, 140121479311359, +ERASE, 140121479311360, 140121487699967, +ERASE, 140120967581696, 140120967585791, +ERASE, 140120967585792, 140120975974399, +ERASE, 140120841756672, 140120841760767, +ERASE, 140120841760768, 140120850149375, +ERASE, 140121412198400, 140121412202495, +ERASE, 140121412202496, 140121420591103, +ERASE, 140122158788608, 140122158792703, +ERASE, 140122158792704, 140122167181311, +ERASE, 140122142003200, 140122142007295, +ERASE, 140122142007296, 140122150395903, +ERASE, 140121101799424, 140121101803519, +ERASE, 140121101803520, 140121110192127, +ERASE, 140120858542080, 140120858546175, +ERASE, 140120858546176, 140120866934783, +ERASE, 140120833363968, 140120833368063, +ERASE, 140120833368064, 140120841756671, +ERASE, 140121277980672, 140121277984767, +ERASE, 140121277984768, 140121286373375, +ERASE, 140121001152512, 140121001156607, +ERASE, 140121001156608, 140121009545215, +ERASE, 140120749502464, 140120749506559, +ERASE, 140120749506560, 140120757895167, +ERASE, 140121605132288, 140121605136383, +ERASE, 140121605136384, 140121613524991, +ERASE, 140121378627584, 140121378631679, +ERASE, 140121378631680, 140121387020287, +ERASE, 140121110192128, 140121110196223, +ERASE, 140121110196224, 140121118584831, +ERASE, 140121462521856, 140121462525951, +ERASE, 140121462525952, 140121470914559, +ERASE, 140121395412992, 140121395417087, +ERASE, 140121395417088, 140121403805695, +ERASE, 140121152155648, 140121152159743, +ERASE, 140121152159744, 140121160548351, +ERASE, 140120992759808, 140120992763903, +ERASE, 140120992763904, 140121001152511, +ERASE, 140122387976192, 140122387980287, +ERASE, 140122387980288, 140122396368895, +ERASE, 140121890353152, 140121890357247, +ERASE, 140121890357248, 140121898745855, +ERASE, 140121269587968, 140121269592063, +ERASE, 140121269592064, 140121277980671, + }; + unsigned long set37[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140722404016128, 140737488351231, +SNULL, 140722404020223, 140737488351231, +STORE, 140722404016128, 140722404020223, +STORE, 140722403885056, 140722404020223, +STORE, 94637010001920, 94637012254719, +SNULL, 94637010132991, 94637012254719, +STORE, 94637010001920, 94637010132991, +STORE, 94637010132992, 94637012254719, +ERASE, 94637010132992, 94637012254719, +STORE, 94637012226048, 94637012234239, +STORE, 94637012234240, 94637012254719, +STORE, 139760240594944, 139760242847743, +SNULL, 139760240738303, 139760242847743, +STORE, 139760240594944, 139760240738303, +STORE, 139760240738304, 139760242847743, +ERASE, 139760240738304, 139760242847743, +STORE, 139760242835456, 139760242843647, +STORE, 139760242843648, 139760242847743, +STORE, 140722405232640, 140722405236735, +STORE, 140722405220352, 140722405232639, +STORE, 139760242806784, 139760242835455, +STORE, 139760242798592, 139760242806783, +STORE, 139760238379008, 139760240594943, +SNULL, 139760238379008, 139760238477311, +STORE, 139760238477312, 139760240594943, +STORE, 139760238379008, 139760238477311, +SNULL, 139760240570367, 139760240594943, +STORE, 139760238477312, 139760240570367, +STORE, 139760240570368, 139760240594943, +SNULL, 139760240570368, 139760240578559, +STORE, 139760240578560, 139760240594943, +STORE, 139760240570368, 139760240578559, +ERASE, 139760240570368, 139760240578559, +STORE, 139760240570368, 139760240578559, +ERASE, 139760240578560, 139760240594943, +STORE, 139760240578560, 139760240594943, +STORE, 139760234582016, 139760238379007, +SNULL, 139760234582016, 139760236240895, +STORE, 139760236240896, 139760238379007, +STORE, 139760234582016, 139760236240895, +SNULL, 139760238338047, 139760238379007, +STORE, 139760236240896, 139760238338047, +STORE, 139760238338048, 139760238379007, +SNULL, 139760238338048, 139760238362623, +STORE, 139760238362624, 139760238379007, +STORE, 139760238338048, 139760238362623, +ERASE, 139760238338048, 139760238362623, +STORE, 139760238338048, 139760238362623, +ERASE, 139760238362624, 139760238379007, +STORE, 139760238362624, 139760238379007, +STORE, 139760242790400, 139760242806783, +SNULL, 139760238354431, 139760238362623, +STORE, 139760238338048, 139760238354431, +STORE, 139760238354432, 139760238362623, +SNULL, 139760240574463, 139760240578559, +STORE, 139760240570368, 139760240574463, +STORE, 139760240574464, 139760240578559, +SNULL, 94637012230143, 94637012234239, +STORE, 94637012226048, 94637012230143, +STORE, 94637012230144, 94637012234239, +SNULL, 139760242839551, 139760242843647, +STORE, 139760242835456, 139760242839551, +STORE, 139760242839552, 139760242843647, +ERASE, 139760242806784, 139760242835455, +STORE, 94637033324544, 94637033459711, +STORE, 139760226189312, 139760234582015, +SNULL, 139760226193407, 139760234582015, +STORE, 139760226189312, 139760226193407, +STORE, 139760226193408, 139760234582015, +STORE, 139760217796608, 139760226189311, +STORE, 139760083578880, 139760217796607, +SNULL, 139760083578880, 139760114860031, +STORE, 139760114860032, 139760217796607, +STORE, 139760083578880, 139760114860031, +ERASE, 139760083578880, 139760114860031, +SNULL, 139760181968895, 139760217796607, +STORE, 139760114860032, 139760181968895, +STORE, 139760181968896, 139760217796607, +ERASE, 139760181968896, 139760217796607, +SNULL, 139760114995199, 139760181968895, +STORE, 139760114860032, 139760114995199, +STORE, 139760114995200, 139760181968895, +SNULL, 139760217800703, 139760226189311, +STORE, 139760217796608, 139760217800703, +STORE, 139760217800704, 139760226189311, +STORE, 139760209403904, 139760217796607, +SNULL, 139760209407999, 139760217796607, +STORE, 139760209403904, 139760209407999, +STORE, 139760209408000, 139760217796607, +STORE, 139760201011200, 139760209403903, +SNULL, 139760201015295, 139760209403903, +STORE, 139760201011200, 139760201015295, +STORE, 139760201015296, 139760209403903, +STORE, 139760192618496, 139760201011199, +SNULL, 139760192622591, 139760201011199, +STORE, 139760192618496, 139760192622591, +STORE, 139760192622592, 139760201011199, +STORE, 139760184225792, 139760192618495, +STORE, 139759980642304, 139760114860031, +STORE, 139759972249600, 139759980642303, +STORE, 139759963856896, 139759980642303, +STORE, 139759955464192, 139759980642303, +STORE, 139759888355328, 139759955464191, +SNULL, 139760047751167, 139760114860031, +STORE, 139759980642304, 139760047751167, +STORE, 139760047751168, 139760114860031, +ERASE, 139760047751168, 139760114860031, +SNULL, 139759980777471, 139760047751167, +STORE, 139759980642304, 139759980777471, +STORE, 139759980777472, 139760047751167, +STORE, 139759980777472, 139760114860031, +SNULL, 139759980777472, 139760047751167, +STORE, 139760047751168, 139760114860031, +STORE, 139759980777472, 139760047751167, +SNULL, 139760047886335, 139760114860031, +STORE, 139760047751168, 139760047886335, +STORE, 139760047886336, 139760114860031, +STORE, 139759821246464, 139759955464191, +SNULL, 139759821246464, 139759888355327, +STORE, 139759888355328, 139759955464191, +STORE, 139759821246464, 139759888355327, +ERASE, 139759821246464, 139759888355327, +ERASE, 139759888355328, 139759955464191, + }; + unsigned long set38[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140730666221568, 140737488351231, +SNULL, 140730666225663, 140737488351231, +STORE, 140730666221568, 140730666225663, +STORE, 140730666090496, 140730666225663, +STORE, 94177584803840, 94177587056639, +SNULL, 94177584934911, 94177587056639, +STORE, 94177584803840, 94177584934911, +STORE, 94177584934912, 94177587056639, +ERASE, 94177584934912, 94177587056639, +STORE, 94177587027968, 94177587036159, +STORE, 94177587036160, 94177587056639, +STORE, 140614382714880, 140614384967679, +SNULL, 140614382858239, 140614384967679, +STORE, 140614382714880, 140614382858239, +STORE, 140614382858240, 140614384967679, +ERASE, 140614382858240, 140614384967679, +STORE, 140614384955392, 140614384963583, +STORE, 140614384963584, 140614384967679, +STORE, 140730666315776, 140730666319871, +STORE, 140730666303488, 140730666315775, +STORE, 140614384926720, 140614384955391, +STORE, 140614384918528, 140614384926719, +STORE, 140614380498944, 140614382714879, +SNULL, 140614380498944, 140614380597247, +STORE, 140614380597248, 140614382714879, +STORE, 140614380498944, 140614380597247, +SNULL, 140614382690303, 140614382714879, +STORE, 140614380597248, 140614382690303, +STORE, 140614382690304, 140614382714879, +SNULL, 140614382690304, 140614382698495, +STORE, 140614382698496, 140614382714879, +STORE, 140614382690304, 140614382698495, +ERASE, 140614382690304, 140614382698495, +STORE, 140614382690304, 140614382698495, +ERASE, 140614382698496, 140614382714879, +STORE, 140614382698496, 140614382714879, +STORE, 140614376701952, 140614380498943, +SNULL, 140614376701952, 140614378360831, +STORE, 140614378360832, 140614380498943, +STORE, 140614376701952, 140614378360831, +SNULL, 140614380457983, 140614380498943, +STORE, 140614378360832, 140614380457983, +STORE, 140614380457984, 140614380498943, +SNULL, 140614380457984, 140614380482559, +STORE, 140614380482560, 140614380498943, +STORE, 140614380457984, 140614380482559, +ERASE, 140614380457984, 140614380482559, +STORE, 140614380457984, 140614380482559, +ERASE, 140614380482560, 140614380498943, +STORE, 140614380482560, 140614380498943, +STORE, 140614384910336, 140614384926719, +SNULL, 140614380474367, 140614380482559, +STORE, 140614380457984, 140614380474367, +STORE, 140614380474368, 140614380482559, +SNULL, 140614382694399, 140614382698495, +STORE, 140614382690304, 140614382694399, +STORE, 140614382694400, 140614382698495, +SNULL, 94177587032063, 94177587036159, +STORE, 94177587027968, 94177587032063, +STORE, 94177587032064, 94177587036159, +SNULL, 140614384959487, 140614384963583, +STORE, 140614384955392, 140614384959487, +STORE, 140614384959488, 140614384963583, +ERASE, 140614384926720, 140614384955391, +STORE, 94177619791872, 94177619927039, +STORE, 140614368309248, 140614376701951, +SNULL, 140614368313343, 140614376701951, +STORE, 140614368309248, 140614368313343, +STORE, 140614368313344, 140614376701951, +STORE, 140614359916544, 140614368309247, +STORE, 140614225698816, 140614359916543, +SNULL, 140614225698816, 140614276481023, +STORE, 140614276481024, 140614359916543, +STORE, 140614225698816, 140614276481023, +ERASE, 140614225698816, 140614276481023, +SNULL, 140614343589887, 140614359916543, +STORE, 140614276481024, 140614343589887, +STORE, 140614343589888, 140614359916543, +ERASE, 140614343589888, 140614359916543, +SNULL, 140614276616191, 140614343589887, +STORE, 140614276481024, 140614276616191, +STORE, 140614276616192, 140614343589887, +SNULL, 140614359920639, 140614368309247, +STORE, 140614359916544, 140614359920639, +STORE, 140614359920640, 140614368309247, +STORE, 140614351523840, 140614359916543, +SNULL, 140614351527935, 140614359916543, +STORE, 140614351523840, 140614351527935, +STORE, 140614351527936, 140614359916543, +STORE, 140614268088320, 140614276481023, +SNULL, 140614268092415, 140614276481023, +STORE, 140614268088320, 140614268092415, +STORE, 140614268092416, 140614276481023, +STORE, 140614259695616, 140614268088319, +SNULL, 140614259699711, 140614268088319, +STORE, 140614259695616, 140614259699711, +STORE, 140614259699712, 140614268088319, +STORE, 140614251302912, 140614259695615, +STORE, 140614242910208, 140614259695615, +STORE, 140614108692480, 140614242910207, +SNULL, 140614108692480, 140614142263295, +STORE, 140614142263296, 140614242910207, +STORE, 140614108692480, 140614142263295, +ERASE, 140614108692480, 140614142263295, +STORE, 140614133870592, 140614142263295, +STORE, 140613999652864, 140614133870591, +SNULL, 140613999652864, 140614008045567, +STORE, 140614008045568, 140614133870591, +STORE, 140613999652864, 140614008045567, +ERASE, 140613999652864, 140614008045567, +STORE, 140613999652864, 140614008045567, +STORE, 140613865435136, 140613999652863, +SNULL, 140613865435136, 140613873827839, +STORE, 140613873827840, 140613999652863, +STORE, 140613865435136, 140613873827839, +ERASE, 140613865435136, 140613873827839, +SNULL, 140614209372159, 140614242910207, +STORE, 140614142263296, 140614209372159, +STORE, 140614209372160, 140614242910207, +ERASE, 140614209372160, 140614242910207, +SNULL, 140614142398463, 140614209372159, +STORE, 140614142263296, 140614142398463, +STORE, 140614142398464, 140614209372159, +SNULL, 140614075154431, 140614133870591, +STORE, 140614008045568, 140614075154431, +STORE, 140614075154432, 140614133870591, +ERASE, 140614075154432, 140614133870591, +SNULL, 140614008180735, 140614075154431, +STORE, 140614008045568, 140614008180735, +STORE, 140614008180736, 140614075154431, +SNULL, 140613940936703, 140613999652863, +STORE, 140613873827840, 140613940936703, +STORE, 140613940936704, 140613999652863, +ERASE, 140613940936704, 140613999652863, +SNULL, 140614242914303, 140614259695615, +STORE, 140614242910208, 140614242914303, +STORE, 140614242914304, 140614259695615, +STORE, 140613739610112, 140613940936703, +STORE, 140614234517504, 140614242910207, +SNULL, 140614242914304, 140614251302911, +STORE, 140614251302912, 140614259695615, +STORE, 140614242914304, 140614251302911, +SNULL, 140614251307007, 140614259695615, +STORE, 140614251302912, 140614251307007, +STORE, 140614251307008, 140614259695615, +SNULL, 140613739610112, 140613873827839, +STORE, 140613873827840, 140613940936703, +STORE, 140613739610112, 140613873827839, +SNULL, 140613873963007, 140613940936703, +STORE, 140613873827840, 140613873963007, +STORE, 140613873963008, 140613940936703, +SNULL, 140614133874687, 140614142263295, +STORE, 140614133870592, 140614133874687, +STORE, 140614133874688, 140614142263295, +SNULL, 140613806718975, 140613873827839, +STORE, 140613739610112, 140613806718975, +STORE, 140613806718976, 140613873827839, +ERASE, 140613806718976, 140613873827839, +STORE, 140614226124800, 140614242910207, +SNULL, 140613739745279, 140613806718975, +STORE, 140613739610112, 140613739745279, +STORE, 140613739745280, 140613806718975, +SNULL, 140613999656959, 140614008045567, +STORE, 140613999652864, 140613999656959, +STORE, 140613999656960, 140614008045567, +SNULL, 140614226124800, 140614234517503, +STORE, 140614234517504, 140614242910207, +STORE, 140614226124800, 140614234517503, +SNULL, 140614234521599, 140614242910207, +STORE, 140614234517504, 140614234521599, +STORE, 140614234521600, 140614242910207, +STORE, 140614217732096, 140614234517503, +STORE, 140614125477888, 140614133870591, +SNULL, 140614125481983, 140614133870591, +STORE, 140614125477888, 140614125481983, +STORE, 140614125481984, 140614133870591, +STORE, 140614117085184, 140614125477887, +SNULL, 140614217736191, 140614234517503, +STORE, 140614217732096, 140614217736191, +STORE, 140614217736192, 140614234517503, +SNULL, 140614117089279, 140614125477887, +STORE, 140614117085184, 140614117089279, +STORE, 140614117089280, 140614125477887, +SNULL, 140614217736192, 140614226124799, +STORE, 140614226124800, 140614234517503, +STORE, 140614217736192, 140614226124799, +SNULL, 140614226128895, 140614234517503, +STORE, 140614226124800, 140614226128895, +STORE, 140614226128896, 140614234517503, +STORE, 140614108692480, 140614117085183, +STORE, 140614100299776, 140614117085183, +STORE, 140614091907072, 140614117085183, +SNULL, 140614091907072, 140614108692479, +STORE, 140614108692480, 140614117085183, +STORE, 140614091907072, 140614108692479, +SNULL, 140614108696575, 140614117085183, +STORE, 140614108692480, 140614108696575, +STORE, 140614108696576, 140614117085183, +SNULL, 140614091907072, 140614100299775, +STORE, 140614100299776, 140614108692479, +STORE, 140614091907072, 140614100299775, +SNULL, 140614100303871, 140614108692479, +STORE, 140614100299776, 140614100303871, +STORE, 140614100303872, 140614108692479, +STORE, 140614083514368, 140614100299775, +SNULL, 140614083518463, 140614100299775, +STORE, 140614083514368, 140614083518463, +STORE, 140614083518464, 140614100299775, +STORE, 140613991260160, 140613999652863, +SNULL, 140614083518464, 140614091907071, +STORE, 140614091907072, 140614100299775, +STORE, 140614083518464, 140614091907071, +SNULL, 140614091911167, 140614100299775, +STORE, 140614091907072, 140614091911167, +STORE, 140614091911168, 140614100299775, +SNULL, 140613991264255, 140613999652863, +STORE, 140613991260160, 140613991264255, +STORE, 140613991264256, 140613999652863, +STORE, 140613982867456, 140613991260159, +SNULL, 140613982871551, 140613991260159, +STORE, 140613982867456, 140613982871551, +STORE, 140613982871552, 140613991260159, +STORE, 140613974474752, 140613982867455, +SNULL, 140613974478847, 140613982867455, +STORE, 140613974474752, 140613974478847, +STORE, 140613974478848, 140613982867455, +STORE, 140613966082048, 140613974474751, +STORE, 140613739745280, 140613873827839, +SNULL, 140613739745280, 140613806718975, +STORE, 140613806718976, 140613873827839, +STORE, 140613739745280, 140613806718975, +SNULL, 140613806854143, 140613873827839, +STORE, 140613806718976, 140613806854143, +STORE, 140613806854144, 140613873827839, +SNULL, 140613966086143, 140613974474751, +STORE, 140613966082048, 140613966086143, +STORE, 140613966086144, 140613974474751, +STORE, 140613957689344, 140613966082047, +STORE, 140613605392384, 140613739610111, +STORE, 140613949296640, 140613966082047, +STORE, 140613596999680, 140613605392383, +STORE, 140613529890816, 140613596999679, +STORE, 140613521498112, 140613529890815, +STORE, 140613513105408, 140613529890815, +STORE, 140613378887680, 140613513105407, +SNULL, 140613378887680, 140613404065791, +STORE, 140613404065792, 140613513105407, +STORE, 140613378887680, 140613404065791, +ERASE, 140613378887680, 140613404065791, +STORE, 140613395673088, 140613404065791, +STORE, 140613261455360, 140613395673087, +SNULL, 140613261455360, 140613269848063, +STORE, 140613269848064, 140613395673087, +STORE, 140613261455360, 140613269848063, +ERASE, 140613261455360, 140613269848063, +STORE, 140613261455360, 140613269848063, +STORE, 140613253062656, 140613269848063, +STORE, 140613118844928, 140613253062655, +STORE, 140613110452224, 140613118844927, +SNULL, 140613118844928, 140613135630335, +STORE, 140613135630336, 140613253062655, +STORE, 140613118844928, 140613135630335, +ERASE, 140613118844928, 140613135630335, +STORE, 140613127237632, 140613135630335, +STORE, 140613110452224, 140613135630335, +STORE, 140612976234496, 140613110452223, +STORE, 140612967841792, 140612976234495, +STORE, 140612833624064, 140612967841791, +STORE, 140612825231360, 140612833624063, +STORE, 140612816838656, 140612833624063, +STORE, 140612682620928, 140612816838655, +STORE, 140612674228224, 140612682620927, +SNULL, 140612682620928, 140612732977151, +STORE, 140612732977152, 140612816838655, +STORE, 140612682620928, 140612732977151, +ERASE, 140612682620928, 140612732977151, +SNULL, 140613672501247, 140613739610111, +STORE, 140613605392384, 140613672501247, +STORE, 140613672501248, 140613739610111, +ERASE, 140613672501248, 140613739610111, +SNULL, 140613605527551, 140613672501247, +STORE, 140613605392384, 140613605527551, +STORE, 140613605527552, 140613672501247, +ERASE, 140613529890816, 140613596999679, +STORE, 140612540010496, 140612674228223, +SNULL, 140612540010496, 140612598759423, +STORE, 140612598759424, 140612674228223, +STORE, 140612540010496, 140612598759423, +ERASE, 140612540010496, 140612598759423, +SNULL, 140613471174655, 140613513105407, +STORE, 140613404065792, 140613471174655, +STORE, 140613471174656, 140613513105407, +ERASE, 140613471174656, 140613513105407, +SNULL, 140613404200959, 140613471174655, +STORE, 140613404065792, 140613404200959, +STORE, 140613404200960, 140613471174655, +SNULL, 140613336956927, 140613395673087, +STORE, 140613269848064, 140613336956927, +STORE, 140613336956928, 140613395673087, +ERASE, 140613336956928, 140613395673087, +SNULL, 140612833624064, 140612867194879, +STORE, 140612867194880, 140612967841791, +STORE, 140612833624064, 140612867194879, +ERASE, 140612833624064, 140612867194879, +SNULL, 140612976234496, 140613001412607, +STORE, 140613001412608, 140613110452223, +STORE, 140612976234496, 140613001412607, +ERASE, 140612976234496, 140613001412607, +SNULL, 140613202739199, 140613253062655, +STORE, 140613135630336, 140613202739199, +STORE, 140613202739200, 140613253062655, +ERASE, 140613202739200, 140613253062655, +SNULL, 140613135765503, 140613202739199, +STORE, 140613135630336, 140613135765503, +STORE, 140613135765504, 140613202739199, +SNULL, 140612816842751, 140612833624063, +STORE, 140612816838656, 140612816842751, +STORE, 140612816842752, 140612833624063, +SNULL, 140613110456319, 140613135630335, +STORE, 140613110452224, 140613110456319, +STORE, 140613110456320, 140613135630335, +SNULL, 140613949300735, 140613966082047, +STORE, 140613949296640, 140613949300735, +STORE, 140613949300736, 140613966082047, +SNULL, 140613110456320, 140613118844927, +STORE, 140613118844928, 140613135630335, +STORE, 140613110456320, 140613118844927, +SNULL, 140613118849023, 140613135630335, +STORE, 140613118844928, 140613118849023, +STORE, 140613118849024, 140613135630335, +SNULL, 140612800086015, 140612816838655, +STORE, 140612732977152, 140612800086015, +STORE, 140612800086016, 140612816838655, +ERASE, 140612800086016, 140612816838655, +SNULL, 140613253062656, 140613261455359, +STORE, 140613261455360, 140613269848063, +STORE, 140613253062656, 140613261455359, +SNULL, 140613261459455, 140613269848063, +STORE, 140613261455360, 140613261459455, +STORE, 140613261459456, 140613269848063, +SNULL, 140612674232319, 140612682620927, +STORE, 140612674228224, 140612674232319, +STORE, 140612674232320, 140612682620927, +STORE, 140613731217408, 140613739610111, +STORE, 140613722824704, 140613739610111, +SNULL, 140613949300736, 140613957689343, +STORE, 140613957689344, 140613966082047, +STORE, 140613949300736, 140613957689343, +SNULL, 140613957693439, 140613966082047, +STORE, 140613957689344, 140613957693439, +STORE, 140613957693440, 140613966082047, +STORE, 140612464541696, 140612674228223, +SNULL, 140612531650559, 140612674228223, +STORE, 140612464541696, 140612531650559, +STORE, 140612531650560, 140612674228223, +SNULL, 140612531650560, 140612598759423, +STORE, 140612598759424, 140612674228223, +STORE, 140612531650560, 140612598759423, +ERASE, 140612531650560, 140612598759423, +SNULL, 140612665868287, 140612674228223, +STORE, 140612598759424, 140612665868287, +STORE, 140612665868288, 140612674228223, +ERASE, 140612665868288, 140612674228223, +SNULL, 140613269983231, 140613336956927, +STORE, 140613269848064, 140613269983231, +STORE, 140613269983232, 140613336956927, +SNULL, 140612934303743, 140612967841791, +STORE, 140612867194880, 140612934303743, +STORE, 140612934303744, 140612967841791, +ERASE, 140612934303744, 140612967841791, +SNULL, 140613068521471, 140613110452223, +STORE, 140613001412608, 140613068521471, +STORE, 140613068521472, 140613110452223, +ERASE, 140613068521472, 140613110452223, +STORE, 140613714432000, 140613739610111, +SNULL, 140613001547775, 140613068521471, +STORE, 140613001412608, 140613001547775, +STORE, 140613001547776, 140613068521471, +SNULL, 140612733112319, 140612800086015, +STORE, 140612732977152, 140612733112319, +STORE, 140612733112320, 140612800086015, +SNULL, 140613513109503, 140613529890815, +STORE, 140613513105408, 140613513109503, +STORE, 140613513109504, 140613529890815, +STORE, 140613706039296, 140613739610111, +STORE, 140613697646592, 140613739610111, +STORE, 140613689253888, 140613739610111, +SNULL, 140613689257983, 140613739610111, +STORE, 140613689253888, 140613689257983, +STORE, 140613689257984, 140613739610111, +SNULL, 140613253066751, 140613261455359, +STORE, 140613253062656, 140613253066751, +STORE, 140613253066752, 140613261455359, +STORE, 140613680861184, 140613689253887, +STORE, 140613588606976, 140613605392383, +SNULL, 140613689257984, 140613731217407, +STORE, 140613731217408, 140613739610111, +STORE, 140613689257984, 140613731217407, +SNULL, 140613731221503, 140613739610111, +STORE, 140613731217408, 140613731221503, +STORE, 140613731221504, 140613739610111, +STORE, 140613580214272, 140613605392383, +SNULL, 140612464676863, 140612531650559, +STORE, 140612464541696, 140612464676863, +STORE, 140612464676864, 140612531650559, +SNULL, 140612598894591, 140612665868287, +STORE, 140612598759424, 140612598894591, +STORE, 140612598894592, 140612665868287, +SNULL, 140612867330047, 140612934303743, +STORE, 140612867194880, 140612867330047, +STORE, 140612867330048, 140612934303743, +STORE, 140613571821568, 140613605392383, +SNULL, 140613571825663, 140613605392383, +STORE, 140613571821568, 140613571825663, +STORE, 140613571825664, 140613605392383, +SNULL, 140613689257984, 140613722824703, +STORE, 140613722824704, 140613731217407, +STORE, 140613689257984, 140613722824703, +SNULL, 140613722828799, 140613731217407, +STORE, 140613722824704, 140613722828799, +STORE, 140613722828800, 140613731217407, +SNULL, 140613689257984, 140613714431999, +STORE, 140613714432000, 140613722824703, +STORE, 140613689257984, 140613714431999, +SNULL, 140613714436095, 140613722824703, +STORE, 140613714432000, 140613714436095, +STORE, 140613714436096, 140613722824703, +SNULL, 140612816842752, 140612825231359, +STORE, 140612825231360, 140612833624063, +STORE, 140612816842752, 140612825231359, +SNULL, 140612825235455, 140612833624063, +STORE, 140612825231360, 140612825235455, +STORE, 140612825235456, 140612833624063, +SNULL, 140613395677183, 140613404065791, +STORE, 140613395673088, 140613395677183, +STORE, 140613395677184, 140613404065791, +SNULL, 140613689257984, 140613706039295, +STORE, 140613706039296, 140613714431999, +STORE, 140613689257984, 140613706039295, +SNULL, 140613706043391, 140613714431999, +STORE, 140613706039296, 140613706043391, +STORE, 140613706043392, 140613714431999, +SNULL, 140613118849024, 140613127237631, +STORE, 140613127237632, 140613135630335, +STORE, 140613118849024, 140613127237631, +SNULL, 140613127241727, 140613135630335, +STORE, 140613127237632, 140613127241727, +STORE, 140613127241728, 140613135630335, +SNULL, 140613571825664, 140613580214271, +STORE, 140613580214272, 140613605392383, +STORE, 140613571825664, 140613580214271, +SNULL, 140613580218367, 140613605392383, +STORE, 140613580214272, 140613580218367, +STORE, 140613580218368, 140613605392383, +SNULL, 140613689257984, 140613697646591, +STORE, 140613697646592, 140613706039295, +STORE, 140613689257984, 140613697646591, +SNULL, 140613697650687, 140613706039295, +STORE, 140613697646592, 140613697650687, +STORE, 140613697650688, 140613706039295, +SNULL, 140613680865279, 140613689253887, +STORE, 140613680861184, 140613680865279, +STORE, 140613680865280, 140613689253887, +STORE, 140613563428864, 140613571821567, +SNULL, 140613563432959, 140613571821567, +STORE, 140613563428864, 140613563432959, +STORE, 140613563432960, 140613571821567, +SNULL, 140613580218368, 140613588606975, +STORE, 140613588606976, 140613605392383, +STORE, 140613580218368, 140613588606975, +SNULL, 140613588611071, 140613605392383, +STORE, 140613588606976, 140613588611071, +STORE, 140613588611072, 140613605392383, +SNULL, 140613513109504, 140613521498111, +STORE, 140613521498112, 140613529890815, +STORE, 140613513109504, 140613521498111, +SNULL, 140613521502207, 140613529890815, +STORE, 140613521498112, 140613521502207, +STORE, 140613521502208, 140613529890815, +SNULL, 140613588611072, 140613596999679, +STORE, 140613596999680, 140613605392383, +STORE, 140613588611072, 140613596999679, +SNULL, 140613597003775, 140613605392383, +STORE, 140613596999680, 140613597003775, +STORE, 140613597003776, 140613605392383, +STORE, 140613555036160, 140613563428863, +SNULL, 140613555040255, 140613563428863, +STORE, 140613555036160, 140613555040255, +STORE, 140613555040256, 140613563428863, +STORE, 140613546643456, 140613555036159, +STORE, 140613538250752, 140613555036159, +SNULL, 140613538250752, 140613546643455, +STORE, 140613546643456, 140613555036159, +STORE, 140613538250752, 140613546643455, +SNULL, 140613546647551, 140613555036159, +STORE, 140613546643456, 140613546647551, +STORE, 140613546647552, 140613555036159, +STORE, 140613504712704, 140613513105407, +STORE, 140613496320000, 140613513105407, +SNULL, 140613496324095, 140613513105407, +STORE, 140613496320000, 140613496324095, +STORE, 140613496324096, 140613513105407, +STORE, 140613487927296, 140613496319999, +SNULL, 140613487931391, 140613496319999, +STORE, 140613487927296, 140613487931391, +STORE, 140613487931392, 140613496319999, +STORE, 140613479534592, 140613487927295, +SNULL, 140612967845887, 140612976234495, +STORE, 140612967841792, 140612967845887, +STORE, 140612967845888, 140612976234495, +STORE, 140613387280384, 140613395673087, +STORE, 140613378887680, 140613395673087, +SNULL, 140613378887680, 140613387280383, +STORE, 140613387280384, 140613395673087, +STORE, 140613378887680, 140613387280383, +SNULL, 140613387284479, 140613395673087, +STORE, 140613387280384, 140613387284479, +STORE, 140613387284480, 140613395673087, +STORE, 140613370494976, 140613387280383, +STORE, 140613362102272, 140613387280383, +SNULL, 140613479538687, 140613487927295, +STORE, 140613479534592, 140613479538687, +STORE, 140613479538688, 140613487927295, +STORE, 140613353709568, 140613387280383, +STORE, 140613345316864, 140613387280383, +STORE, 140613244669952, 140613253062655, +SNULL, 140613345320959, 140613387280383, +STORE, 140613345316864, 140613345320959, +STORE, 140613345320960, 140613387280383, +SNULL, 140613538254847, 140613546643455, +STORE, 140613538250752, 140613538254847, +STORE, 140613538254848, 140613546643455, +STORE, 140613236277248, 140613253062655, +STORE, 140613227884544, 140613253062655, +STORE, 140613219491840, 140613253062655, +STORE, 140613211099136, 140613253062655, +SNULL, 140613211103231, 140613253062655, +STORE, 140613211099136, 140613211103231, +STORE, 140613211103232, 140613253062655, +STORE, 140613102059520, 140613110452223, +STORE, 140613093666816, 140613110452223, +SNULL, 140613093670911, 140613110452223, +STORE, 140613093666816, 140613093670911, +STORE, 140613093670912, 140613110452223, +STORE, 140613085274112, 140613093666815, +SNULL, 140613496324096, 140613504712703, +STORE, 140613504712704, 140613513105407, +STORE, 140613496324096, 140613504712703, +SNULL, 140613504716799, 140613513105407, +STORE, 140613504712704, 140613504716799, +STORE, 140613504716800, 140613513105407, +SNULL, 140613345320960, 140613378887679, +STORE, 140613378887680, 140613387280383, +STORE, 140613345320960, 140613378887679, +SNULL, 140613378891775, 140613387280383, +STORE, 140613378887680, 140613378891775, +STORE, 140613378891776, 140613387280383, +SNULL, 140613345320960, 140613362102271, +STORE, 140613362102272, 140613378887679, +STORE, 140613345320960, 140613362102271, +SNULL, 140613362106367, 140613378887679, +STORE, 140613362102272, 140613362106367, +STORE, 140613362106368, 140613378887679, +SNULL, 140613362106368, 140613370494975, +STORE, 140613370494976, 140613378887679, +STORE, 140613362106368, 140613370494975, +SNULL, 140613370499071, 140613378887679, +STORE, 140613370494976, 140613370499071, +STORE, 140613370499072, 140613378887679, +STORE, 140613076881408, 140613093666815, +STORE, 140612993019904, 140613001412607, +SNULL, 140613076885503, 140613093666815, +STORE, 140613076881408, 140613076885503, +STORE, 140613076885504, 140613093666815, +SNULL, 140613093670912, 140613102059519, +STORE, 140613102059520, 140613110452223, +STORE, 140613093670912, 140613102059519, +SNULL, 140613102063615, 140613110452223, +STORE, 140613102059520, 140613102063615, +STORE, 140613102063616, 140613110452223, +SNULL, 140613076885504, 140613085274111, +STORE, 140613085274112, 140613093666815, +STORE, 140613076885504, 140613085274111, +SNULL, 140613085278207, 140613093666815, +STORE, 140613085274112, 140613085278207, +STORE, 140613085278208, 140613093666815, +STORE, 140612984627200, 140613001412607, +STORE, 140612967845888, 140612984627199, +SNULL, 140613211103232, 140613219491839, +STORE, 140613219491840, 140613253062655, +STORE, 140613211103232, 140613219491839, +SNULL, 140613219495935, 140613253062655, +STORE, 140613219491840, 140613219495935, +STORE, 140613219495936, 140613253062655, +STORE, 140612959449088, 140612967841791, +STORE, 140612951056384, 140612967841791, +SNULL, 140612951060479, 140612967841791, +STORE, 140612951056384, 140612951060479, +STORE, 140612951060480, 140612967841791, +SNULL, 140613345320960, 140613353709567, +STORE, 140613353709568, 140613362102271, +STORE, 140613345320960, 140613353709567, +SNULL, 140613353713663, 140613362102271, +STORE, 140613353709568, 140613353713663, +STORE, 140613353713664, 140613362102271, +SNULL, 140613219495936, 140613244669951, +STORE, 140613244669952, 140613253062655, +STORE, 140613219495936, 140613244669951, +SNULL, 140613244674047, 140613253062655, +STORE, 140613244669952, 140613244674047, +STORE, 140613244674048, 140613253062655, +STORE, 140612942663680, 140612951056383, +SNULL, 140613219495936, 140613236277247, +STORE, 140613236277248, 140613244669951, +STORE, 140613219495936, 140613236277247, +SNULL, 140613236281343, 140613244669951, +STORE, 140613236277248, 140613236281343, +STORE, 140613236281344, 140613244669951, +SNULL, 140613219495936, 140613227884543, +STORE, 140613227884544, 140613236277247, +STORE, 140613219495936, 140613227884543, +SNULL, 140613227888639, 140613236277247, +STORE, 140613227884544, 140613227888639, +STORE, 140613227888640, 140613236277247, +SNULL, 140612984627200, 140612993019903, +STORE, 140612993019904, 140613001412607, +STORE, 140612984627200, 140612993019903, +SNULL, 140612993023999, 140613001412607, +STORE, 140612993019904, 140612993023999, +STORE, 140612993024000, 140613001412607, +STORE, 140612858802176, 140612867194879, +STORE, 140612850409472, 140612867194879, +SNULL, 140612951060480, 140612959449087, +STORE, 140612959449088, 140612967841791, +STORE, 140612951060480, 140612959449087, +SNULL, 140612959453183, 140612967841791, +STORE, 140612959449088, 140612959453183, +STORE, 140612959453184, 140612967841791, +SNULL, 140612967845888, 140612976234495, +STORE, 140612976234496, 140612984627199, +STORE, 140612967845888, 140612976234495, +SNULL, 140612976238591, 140612984627199, +STORE, 140612976234496, 140612976238591, +STORE, 140612976238592, 140612984627199, +STORE, 140612842016768, 140612867194879, +SNULL, 140612842020863, 140612867194879, +STORE, 140612842016768, 140612842020863, +STORE, 140612842020864, 140612867194879, +SNULL, 140612984631295, 140612993019903, +STORE, 140612984627200, 140612984631295, +STORE, 140612984631296, 140612993019903, +STORE, 140612825235456, 140612842016767, +STORE, 140612808445952, 140612816838655, +SNULL, 140612942667775, 140612951056383, +STORE, 140612942663680, 140612942667775, +STORE, 140612942667776, 140612951056383, +STORE, 140612724584448, 140612732977151, +SNULL, 140612724588543, 140612732977151, +STORE, 140612724584448, 140612724588543, +STORE, 140612724588544, 140612732977151, +STORE, 140612716191744, 140612724584447, +SNULL, 140612842020864, 140612850409471, +STORE, 140612850409472, 140612867194879, +STORE, 140612842020864, 140612850409471, +SNULL, 140612850413567, 140612867194879, +STORE, 140612850409472, 140612850413567, +STORE, 140612850413568, 140612867194879, +SNULL, 140612850413568, 140612858802175, +STORE, 140612858802176, 140612867194879, +STORE, 140612850413568, 140612858802175, +SNULL, 140612858806271, 140612867194879, +STORE, 140612858802176, 140612858806271, +STORE, 140612858806272, 140612867194879, +STORE, 140612707799040, 140612724584447, +SNULL, 140612707803135, 140612724584447, +STORE, 140612707799040, 140612707803135, +STORE, 140612707803136, 140612724584447, +SNULL, 140612707803136, 140612716191743, +STORE, 140612716191744, 140612724584447, +STORE, 140612707803136, 140612716191743, +SNULL, 140612716195839, 140612724584447, +STORE, 140612716191744, 140612716195839, +STORE, 140612716195840, 140612724584447, +SNULL, 140612808450047, 140612816838655, +STORE, 140612808445952, 140612808450047, +STORE, 140612808450048, 140612816838655, +SNULL, 140612825235456, 140612833624063, +STORE, 140612833624064, 140612842016767, +STORE, 140612825235456, 140612833624063, +SNULL, 140612833628159, 140612842016767, +STORE, 140612833624064, 140612833628159, +STORE, 140612833628160, 140612842016767, +STORE, 140612699406336, 140612707799039, +SNULL, 140612699410431, 140612707799039, +STORE, 140612699406336, 140612699410431, +STORE, 140612699410432, 140612707799039, +STORE, 140614384926720, 140614384955391, +STORE, 140614349332480, 140614351523839, +SNULL, 140614349332480, 140614349422591, +STORE, 140614349422592, 140614351523839, +STORE, 140614349332480, 140614349422591, +SNULL, 140614351515647, 140614351523839, +STORE, 140614349422592, 140614351515647, +STORE, 140614351515648, 140614351523839, +ERASE, 140614351515648, 140614351523839, +STORE, 140614351515648, 140614351523839, +SNULL, 140614351519743, 140614351523839, +STORE, 140614351515648, 140614351519743, +STORE, 140614351519744, 140614351523839, +ERASE, 140614384926720, 140614384955391, +ERASE, 140613949296640, 140613949300735, +ERASE, 140613949300736, 140613957689343, +ERASE, 140613689253888, 140613689257983, +ERASE, 140613689257984, 140613697646591, +ERASE, 140613563428864, 140613563432959, +ERASE, 140613563432960, 140613571821567, +ERASE, 140613211099136, 140613211103231, +ERASE, 140613211103232, 140613219491839, +ERASE, 140614133870592, 140614133874687, +ERASE, 140614133874688, 140614142263295, +ERASE, 140612967841792, 140612967845887, +ERASE, 140612967845888, 140612976234495, +ERASE, 140613076881408, 140613076885503, +ERASE, 140613076885504, 140613085274111, +ERASE, 140612850409472, 140612850413567, +ERASE, 140612850413568, 140612858802175, +ERASE, 140613110452224, 140613110456319, +ERASE, 140613110456320, 140613118844927, +ERASE, 140613706039296, 140613706043391, +ERASE, 140613706043392, 140613714431999, +ERASE, 140613521498112, 140613521502207, +ERASE, 140613521502208, 140613529890815, +ERASE, 140613362102272, 140613362106367, +ERASE, 140613362106368, 140613370494975, +ERASE, 140613253062656, 140613253066751, +ERASE, 140613253066752, 140613261455359, +ERASE, 140612816838656, 140612816842751, +ERASE, 140612816842752, 140612825231359, +ERASE, 140613261455360, 140613261459455, +ERASE, 140613261459456, 140613269848063, +ERASE, 140613118844928, 140613118849023, +ERASE, 140613118849024, 140613127237631, +ERASE, 140613714432000, 140613714436095, +ERASE, 140613714436096, 140613722824703, +ERASE, 140613496320000, 140613496324095, +ERASE, 140613496324096, 140613504712703, +ERASE, 140613513105408, 140613513109503, +ERASE, 140613513109504, 140613521498111, +ERASE, 140613697646592, 140613697650687, +ERASE, 140613697650688, 140613706039295, +ERASE, 140613093666816, 140613093670911, +ERASE, 140613093670912, 140613102059519, +ERASE, 140612993019904, 140612993023999, +ERASE, 140612993024000, 140613001412607, +ERASE, 140613127237632, 140613127241727, +ERASE, 140613127241728, 140613135630335, +ERASE, 140613957689344, 140613957693439, +ERASE, 140613957693440, 140613966082047, +ERASE, 140613571821568, 140613571825663, +ERASE, 140613571825664, 140613580214271, +ERASE, 140613479534592, 140613479538687, +ERASE, 140613479538688, 140613487927295, +ERASE, 140612984627200, 140612984631295, +ERASE, 140612984631296, 140612993019903, +ERASE, 140613588606976, 140613588611071, +ERASE, 140613588611072, 140613596999679, +ERASE, 140613680861184, 140613680865279, +ERASE, 140613680865280, 140613689253887, +ERASE, 140613345316864, 140613345320959, +ERASE, 140613345320960, 140613353709567, +ERASE, 140613596999680, 140613597003775, +ERASE, 140613597003776, 140613605392383, +ERASE, 140613966082048, 140613966086143, +ERASE, 140613966086144, 140613974474751, +ERASE, 140613731217408, 140613731221503, +ERASE, 140613731221504, 140613739610111, +ERASE, 140613395673088, 140613395677183, +ERASE, 140613395677184, 140613404065791, +ERASE, 140612825231360, 140612825235455, +ERASE, 140612825235456, 140612833624063, +ERASE, 140612674228224, 140612674232319, +ERASE, 140612674232320, 140612682620927, +ERASE, 140613722824704, 140613722828799, +ERASE, 140613722828800, 140613731217407, +ERASE, 140613487927296, 140613487931391, +ERASE, 140613487931392, 140613496319999, +ERASE, 140613102059520, 140613102063615, +ERASE, 140613102063616, 140613110452223, +ERASE, 140614242910208, 140614242914303, +ERASE, 140614242914304, 140614251302911, +ERASE, 140612808445952, 140612808450047, +ERASE, 140612808450048, 140612816838655, +ERASE, 140613236277248, 140613236281343, +ERASE, 140613236281344, 140613244669951, +ERASE, 140613580214272, 140613580218367, +ERASE, 140613580218368, 140613588606975, +ERASE, 140613370494976, 140613370499071, +ERASE, 140613370499072, 140613378887679, +ERASE, 140613244669952, 140613244674047, +ERASE, 140613244674048, 140613253062655, +ERASE, 140612724584448, 140612724588543, +ERASE, 140612724588544, 140612732977151, +ERASE, 140612707799040, 140612707803135, +ERASE, 140612707803136, 140612716191743, +ERASE, 140613504712704, 140613504716799, +ERASE, 140613504716800, 140613513105407, + }; + + unsigned long set39[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140736271417344, 140737488351231, +SNULL, 140736271421439, 140737488351231, +STORE, 140736271417344, 140736271421439, +STORE, 140736271286272, 140736271421439, +STORE, 94412930822144, 94412933074943, +SNULL, 94412930953215, 94412933074943, +STORE, 94412930822144, 94412930953215, +STORE, 94412930953216, 94412933074943, +ERASE, 94412930953216, 94412933074943, +STORE, 94412933046272, 94412933054463, +STORE, 94412933054464, 94412933074943, +STORE, 140326136901632, 140326139154431, +SNULL, 140326137044991, 140326139154431, +STORE, 140326136901632, 140326137044991, +STORE, 140326137044992, 140326139154431, +ERASE, 140326137044992, 140326139154431, +STORE, 140326139142144, 140326139150335, +STORE, 140326139150336, 140326139154431, +STORE, 140736271585280, 140736271589375, +STORE, 140736271572992, 140736271585279, +STORE, 140326139113472, 140326139142143, +STORE, 140326139105280, 140326139113471, +STORE, 140326134685696, 140326136901631, +SNULL, 140326134685696, 140326134783999, +STORE, 140326134784000, 140326136901631, +STORE, 140326134685696, 140326134783999, +SNULL, 140326136877055, 140326136901631, +STORE, 140326134784000, 140326136877055, +STORE, 140326136877056, 140326136901631, +SNULL, 140326136877056, 140326136885247, +STORE, 140326136885248, 140326136901631, +STORE, 140326136877056, 140326136885247, +ERASE, 140326136877056, 140326136885247, +STORE, 140326136877056, 140326136885247, +ERASE, 140326136885248, 140326136901631, +STORE, 140326136885248, 140326136901631, +STORE, 140326130888704, 140326134685695, +SNULL, 140326130888704, 140326132547583, +STORE, 140326132547584, 140326134685695, +STORE, 140326130888704, 140326132547583, +SNULL, 140326134644735, 140326134685695, +STORE, 140326132547584, 140326134644735, +STORE, 140326134644736, 140326134685695, +SNULL, 140326134644736, 140326134669311, +STORE, 140326134669312, 140326134685695, +STORE, 140326134644736, 140326134669311, +ERASE, 140326134644736, 140326134669311, +STORE, 140326134644736, 140326134669311, +ERASE, 140326134669312, 140326134685695, +STORE, 140326134669312, 140326134685695, +STORE, 140326139097088, 140326139113471, +SNULL, 140326134661119, 140326134669311, +STORE, 140326134644736, 140326134661119, +STORE, 140326134661120, 140326134669311, +SNULL, 140326136881151, 140326136885247, +STORE, 140326136877056, 140326136881151, +STORE, 140326136881152, 140326136885247, +SNULL, 94412933050367, 94412933054463, +STORE, 94412933046272, 94412933050367, +STORE, 94412933050368, 94412933054463, +SNULL, 140326139146239, 140326139150335, +STORE, 140326139142144, 140326139146239, +STORE, 140326139146240, 140326139150335, +ERASE, 140326139113472, 140326139142143, +STORE, 94412939493376, 94412939628543, +STORE, 140326122496000, 140326130888703, +SNULL, 140326122500095, 140326130888703, +STORE, 140326122496000, 140326122500095, +STORE, 140326122500096, 140326130888703, +STORE, 140326114103296, 140326122495999, +STORE, 140325979885568, 140326114103295, +SNULL, 140325979885568, 140326043910143, +STORE, 140326043910144, 140326114103295, +STORE, 140325979885568, 140326043910143, +ERASE, 140325979885568, 140326043910143, +SNULL, 140326111019007, 140326114103295, +STORE, 140326043910144, 140326111019007, +STORE, 140326111019008, 140326114103295, +ERASE, 140326111019008, 140326114103295, +SNULL, 140326044045311, 140326111019007, +STORE, 140326043910144, 140326044045311, +STORE, 140326044045312, 140326111019007, +SNULL, 140326114107391, 140326122495999, +STORE, 140326114103296, 140326114107391, +STORE, 140326114107392, 140326122495999, +STORE, 140326035517440, 140326043910143, +SNULL, 140326035521535, 140326043910143, +STORE, 140326035517440, 140326035521535, +STORE, 140326035521536, 140326043910143, +STORE, 140326027124736, 140326035517439, +SNULL, 140326027128831, 140326035517439, +STORE, 140326027124736, 140326027128831, +STORE, 140326027128832, 140326035517439, +STORE, 140326018732032, 140326027124735, +SNULL, 140326018736127, 140326027124735, +STORE, 140326018732032, 140326018736127, +STORE, 140326018736128, 140326027124735, +STORE, 140326010339328, 140326018732031, +STORE, 140326001946624, 140326018732031, +STORE, 140325993553920, 140326018732031, +STORE, 140325859336192, 140325993553919, +SNULL, 140325859336192, 140325909692415, +STORE, 140325909692416, 140325993553919, +STORE, 140325859336192, 140325909692415, +ERASE, 140325859336192, 140325909692415, +SNULL, 140325976801279, 140325993553919, +STORE, 140325909692416, 140325976801279, +STORE, 140325976801280, 140325993553919, +ERASE, 140325976801280, 140325993553919, +STORE, 140325985161216, 140326018732031, +STORE, 140325775474688, 140325976801279, +STORE, 140325708365824, 140325976801279, +SNULL, 140325708500991, 140325976801279, +STORE, 140325708365824, 140325708500991, +STORE, 140325708500992, 140325976801279, +SNULL, 140325708500992, 140325909692415, +STORE, 140325909692416, 140325976801279, +STORE, 140325708500992, 140325909692415, +SNULL, 140325909827583, 140325976801279, +STORE, 140325909692416, 140325909827583, +STORE, 140325909827584, 140325976801279, +SNULL, 140325842583551, 140325909692415, +STORE, 140325708500992, 140325842583551, +STORE, 140325842583552, 140325909692415, +ERASE, 140325842583552, 140325909692415, +SNULL, 140325708500992, 140325775474687, +STORE, 140325775474688, 140325842583551, +STORE, 140325708500992, 140325775474687, +SNULL, 140325775609855, 140325842583551, +STORE, 140325775474688, 140325775609855, +STORE, 140325775609856, 140325842583551, +STORE, 140325775609856, 140325909692415, +SNULL, 140325775609856, 140325842583551, +STORE, 140325842583552, 140325909692415, +STORE, 140325775609856, 140325842583551, +SNULL, 140325842718719, 140325909692415, +STORE, 140325842583552, 140325842718719, +STORE, 140325842718720, 140325909692415, +SNULL, 140325985161216, 140325993553919, +STORE, 140325993553920, 140326018732031, +STORE, 140325985161216, 140325993553919, +SNULL, 140325993558015, 140326018732031, +STORE, 140325993553920, 140325993558015, +STORE, 140325993558016, 140326018732031, +SNULL, 140325985165311, 140325993553919, +STORE, 140325985161216, 140325985165311, +STORE, 140325985165312, 140325993553919, +SNULL, 140325993558016, 140326001946623, +STORE, 140326001946624, 140326018732031, +STORE, 140325993558016, 140326001946623, +SNULL, 140326001950719, 140326018732031, +STORE, 140326001946624, 140326001950719, +STORE, 140326001950720, 140326018732031, +SNULL, 140326001950720, 140326010339327, +STORE, 140326010339328, 140326018732031, +STORE, 140326001950720, 140326010339327, +SNULL, 140326010343423, 140326018732031, +STORE, 140326010339328, 140326010343423, +STORE, 140326010343424, 140326018732031, +STORE, 140325699973120, 140325708365823, +STORE, 140325691580416, 140325708365823, +STORE, 140325683187712, 140325708365823, +SNULL, 140325683191807, 140325708365823, +STORE, 140325683187712, 140325683191807, +STORE, 140325683191808, 140325708365823, +SNULL, 140325683191808, 140325699973119, +STORE, 140325699973120, 140325708365823, +STORE, 140325683191808, 140325699973119, +SNULL, 140325699977215, 140325708365823, +STORE, 140325699973120, 140325699977215, +STORE, 140325699977216, 140325708365823, +STORE, 140325674795008, 140325683187711, +STORE, 140325666402304, 140325683187711, +STORE, 140325658009600, 140325683187711, +SNULL, 140325658009600, 140325666402303, +STORE, 140325666402304, 140325683187711, +STORE, 140325658009600, 140325666402303, +SNULL, 140325666406399, 140325683187711, +STORE, 140325666402304, 140325666406399, +STORE, 140325666406400, 140325683187711, +SNULL, 140325683191808, 140325691580415, +STORE, 140325691580416, 140325699973119, +STORE, 140325683191808, 140325691580415, +SNULL, 140325691584511, 140325699973119, +STORE, 140325691580416, 140325691584511, +STORE, 140325691584512, 140325699973119, +SNULL, 140325666406400, 140325674795007, +STORE, 140325674795008, 140325683187711, +STORE, 140325666406400, 140325674795007, +SNULL, 140325674799103, 140325683187711, +STORE, 140325674795008, 140325674799103, +STORE, 140325674799104, 140325683187711, +STORE, 140325649616896, 140325666402303, +SNULL, 140325649616896, 140325658009599, +STORE, 140325658009600, 140325666402303, +STORE, 140325649616896, 140325658009599, +SNULL, 140325658013695, 140325666402303, +STORE, 140325658009600, 140325658013695, +STORE, 140325658013696, 140325666402303, +SNULL, 140325649620991, 140325658009599, +STORE, 140325649616896, 140325649620991, +STORE, 140325649620992, 140325658009599, +STORE, 140325641224192, 140325649616895, +STORE, 140325632831488, 140325649616895, +SNULL, 140325632835583, 140325649616895, +STORE, 140325632831488, 140325632835583, +STORE, 140325632835584, 140325649616895, +STORE, 140325624438784, 140325632831487, +SNULL, 140325624442879, 140325632831487, +STORE, 140325624438784, 140325624442879, +STORE, 140325624442880, 140325632831487, +SNULL, 140325632835584, 140325641224191, +STORE, 140325641224192, 140325649616895, +STORE, 140325632835584, 140325641224191, +SNULL, 140325641228287, 140325649616895, +STORE, 140325641224192, 140325641228287, +STORE, 140325641228288, 140325649616895, +STORE, 140325616046080, 140325624438783, +SNULL, 140325616050175, 140325624438783, +STORE, 140325616046080, 140325616050175, +STORE, 140325616050176, 140325624438783, +STORE, 140325607653376, 140325616046079, +SNULL, 140325607657471, 140325616046079, +STORE, 140325607653376, 140325607657471, +STORE, 140325607657472, 140325616046079, +STORE, 140325599260672, 140325607653375, +STORE, 140325590867968, 140325607653375, +STORE, 140325456650240, 140325590867967, +SNULL, 140325456650240, 140325507039231, +STORE, 140325507039232, 140325590867967, +STORE, 140325456650240, 140325507039231, +ERASE, 140325456650240, 140325507039231, +STORE, 140325498646528, 140325507039231, +STORE, 140325364428800, 140325498646527, +SNULL, 140325364428800, 140325372821503, +STORE, 140325372821504, 140325498646527, +STORE, 140325364428800, 140325372821503, +ERASE, 140325364428800, 140325372821503, +STORE, 140325364428800, 140325372821503, +STORE, 140325356036096, 140325372821503, +STORE, 140325221818368, 140325356036095, +SNULL, 140325221818368, 140325238603775, +STORE, 140325238603776, 140325356036095, +STORE, 140325221818368, 140325238603775, +ERASE, 140325221818368, 140325238603775, +STORE, 140325230211072, 140325238603775, +STORE, 140325221818368, 140325238603775, +STORE, 140325087600640, 140325221818367, +STORE, 140325079207936, 140325087600639, +SNULL, 140325087600640, 140325104386047, +STORE, 140325104386048, 140325221818367, +STORE, 140325087600640, 140325104386047, +ERASE, 140325087600640, 140325104386047, +STORE, 140325095993344, 140325104386047, +STORE, 140325079207936, 140325104386047, +STORE, 140324944990208, 140325079207935, +SNULL, 140324944990208, 140324970168319, +STORE, 140324970168320, 140325079207935, +STORE, 140324944990208, 140324970168319, +ERASE, 140324944990208, 140324970168319, +STORE, 140324961775616, 140324970168319, +STORE, 140324953382912, 140324970168319, +STORE, 140324819165184, 140324953382911, +STORE, 140324684947456, 140324953382911, +STORE, 140324676554752, 140324684947455, +STORE, 140324668162048, 140324684947455, +STORE, 140324533944320, 140324668162047, +STORE, 140324525551616, 140324533944319, +SNULL, 140324533944320, 140324567515135, +STORE, 140324567515136, 140324668162047, +STORE, 140324533944320, 140324567515135, +ERASE, 140324533944320, 140324567515135, +STORE, 140324559122432, 140324567515135, +STORE, 140324391333888, 140324525551615, +SNULL, 140325574148095, 140325590867967, +STORE, 140325507039232, 140325574148095, +STORE, 140325574148096, 140325590867967, +ERASE, 140325574148096, 140325590867967, +SNULL, 140325439930367, 140325498646527, +STORE, 140325372821504, 140325439930367, +STORE, 140325439930368, 140325498646527, +ERASE, 140325439930368, 140325498646527, +SNULL, 140325305712639, 140325356036095, +STORE, 140325238603776, 140325305712639, +STORE, 140325305712640, 140325356036095, +ERASE, 140325305712640, 140325356036095, +SNULL, 140325171494911, 140325221818367, +STORE, 140325104386048, 140325171494911, +STORE, 140325171494912, 140325221818367, +ERASE, 140325171494912, 140325221818367, +SNULL, 140325104521215, 140325171494911, +STORE, 140325104386048, 140325104521215, +STORE, 140325104521216, 140325171494911, +STORE, 140324257116160, 140324525551615, +SNULL, 140324257116160, 140324299079679, +STORE, 140324299079680, 140324525551615, +STORE, 140324257116160, 140324299079679, +ERASE, 140324257116160, 140324299079679, +SNULL, 140325037277183, 140325079207935, +STORE, 140324970168320, 140325037277183, +STORE, 140325037277184, 140325079207935, +ERASE, 140325037277184, 140325079207935, +SNULL, 140324819165183, 140324953382911, +STORE, 140324684947456, 140324819165183, +STORE, 140324819165184, 140324953382911, +SNULL, 140324819165184, 140324835950591, +STORE, 140324835950592, 140324953382911, +STORE, 140324819165184, 140324835950591, +ERASE, 140324819165184, 140324835950591, +SNULL, 140324903059455, 140324953382911, +STORE, 140324835950592, 140324903059455, +STORE, 140324903059456, 140324953382911, +ERASE, 140324903059456, 140324953382911, +SNULL, 140324684947456, 140324701732863, +STORE, 140324701732864, 140324819165183, +STORE, 140324684947456, 140324701732863, +ERASE, 140324684947456, 140324701732863, +SNULL, 140324768841727, 140324819165183, +STORE, 140324701732864, 140324768841727, +STORE, 140324768841728, 140324819165183, +ERASE, 140324768841728, 140324819165183, +SNULL, 140324634623999, 140324668162047, +STORE, 140324567515136, 140324634623999, +STORE, 140324634624000, 140324668162047, +ERASE, 140324634624000, 140324668162047, +SNULL, 140324391333887, 140324525551615, +STORE, 140324299079680, 140324391333887, +STORE, 140324391333888, 140324525551615, +SNULL, 140324391333888, 140324433297407, +STORE, 140324433297408, 140324525551615, +STORE, 140324391333888, 140324433297407, +ERASE, 140324391333888, 140324433297407, +SNULL, 140325507174399, 140325574148095, +STORE, 140325507039232, 140325507174399, +STORE, 140325507174400, 140325574148095, +SNULL, 140325590867968, 140325599260671, +STORE, 140325599260672, 140325607653375, +STORE, 140325590867968, 140325599260671, +SNULL, 140325599264767, 140325607653375, +STORE, 140325599260672, 140325599264767, +STORE, 140325599264768, 140325607653375, +SNULL, 140325372956671, 140325439930367, +STORE, 140325372821504, 140325372956671, +STORE, 140325372956672, 140325439930367, +SNULL, 140324668166143, 140324684947455, +STORE, 140324668162048, 140324668166143, +STORE, 140324668166144, 140324684947455, +SNULL, 140324525555711, 140324533944319, +STORE, 140324525551616, 140324525555711, +STORE, 140324525555712, 140324533944319, +SNULL, 140324953382912, 140324961775615, +STORE, 140324961775616, 140324970168319, +STORE, 140324953382912, 140324961775615, +SNULL, 140324961779711, 140324970168319, +STORE, 140324961775616, 140324961779711, +STORE, 140324961779712, 140324970168319, +SNULL, 140325079212031, 140325104386047, +STORE, 140325079207936, 140325079212031, +STORE, 140325079212032, 140325104386047, +SNULL, 140325221818368, 140325230211071, +STORE, 140325230211072, 140325238603775, +STORE, 140325221818368, 140325230211071, +SNULL, 140325230215167, 140325238603775, +STORE, 140325230211072, 140325230215167, +STORE, 140325230215168, 140325238603775, +SNULL, 140325356036096, 140325364428799, +STORE, 140325364428800, 140325372821503, +STORE, 140325356036096, 140325364428799, +SNULL, 140325364432895, 140325372821503, + }; + unsigned long set40[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140734309167104, 140737488351231, +SNULL, 140734309171199, 140737488351231, +STORE, 140734309167104, 140734309171199, +STORE, 140734309036032, 140734309171199, +STORE, 94270500081664, 94270502334463, +SNULL, 94270500212735, 94270502334463, +STORE, 94270500081664, 94270500212735, +STORE, 94270500212736, 94270502334463, +ERASE, 94270500212736, 94270502334463, +STORE, 94270502305792, 94270502313983, +STORE, 94270502313984, 94270502334463, +STORE, 140321935110144, 140321937362943, +SNULL, 140321935253503, 140321937362943, +STORE, 140321935110144, 140321935253503, +STORE, 140321935253504, 140321937362943, +ERASE, 140321935253504, 140321937362943, +STORE, 140321937350656, 140321937358847, +STORE, 140321937358848, 140321937362943, +STORE, 140734309625856, 140734309629951, +STORE, 140734309613568, 140734309625855, +STORE, 140321937321984, 140321937350655, +STORE, 140321937313792, 140321937321983, +STORE, 140321932894208, 140321935110143, +SNULL, 140321932894208, 140321932992511, +STORE, 140321932992512, 140321935110143, +STORE, 140321932894208, 140321932992511, +SNULL, 140321935085567, 140321935110143, +STORE, 140321932992512, 140321935085567, +STORE, 140321935085568, 140321935110143, +SNULL, 140321935085568, 140321935093759, +STORE, 140321935093760, 140321935110143, +STORE, 140321935085568, 140321935093759, +ERASE, 140321935085568, 140321935093759, +STORE, 140321935085568, 140321935093759, +ERASE, 140321935093760, 140321935110143, +STORE, 140321935093760, 140321935110143, +STORE, 140321929097216, 140321932894207, +SNULL, 140321929097216, 140321930756095, +STORE, 140321930756096, 140321932894207, +STORE, 140321929097216, 140321930756095, +SNULL, 140321932853247, 140321932894207, +STORE, 140321930756096, 140321932853247, +STORE, 140321932853248, 140321932894207, +SNULL, 140321932853248, 140321932877823, +STORE, 140321932877824, 140321932894207, +STORE, 140321932853248, 140321932877823, +ERASE, 140321932853248, 140321932877823, +STORE, 140321932853248, 140321932877823, +ERASE, 140321932877824, 140321932894207, +STORE, 140321932877824, 140321932894207, +STORE, 140321937305600, 140321937321983, +SNULL, 140321932869631, 140321932877823, +STORE, 140321932853248, 140321932869631, +STORE, 140321932869632, 140321932877823, +SNULL, 140321935089663, 140321935093759, +STORE, 140321935085568, 140321935089663, +STORE, 140321935089664, 140321935093759, +SNULL, 94270502309887, 94270502313983, +STORE, 94270502305792, 94270502309887, +STORE, 94270502309888, 94270502313983, +SNULL, 140321937354751, 140321937358847, +STORE, 140321937350656, 140321937354751, +STORE, 140321937354752, 140321937358847, +ERASE, 140321937321984, 140321937350655, +STORE, 94270507364352, 94270507499519, +STORE, 140321920704512, 140321929097215, +SNULL, 140321920708607, 140321929097215, +STORE, 140321920704512, 140321920708607, +STORE, 140321920708608, 140321929097215, +STORE, 140321912311808, 140321920704511, +STORE, 140321778094080, 140321912311807, +SNULL, 140321778094080, 140321816051711, +STORE, 140321816051712, 140321912311807, +STORE, 140321778094080, 140321816051711, +ERASE, 140321778094080, 140321816051711, +SNULL, 140321883160575, 140321912311807, +STORE, 140321816051712, 140321883160575, +STORE, 140321883160576, 140321912311807, +ERASE, 140321883160576, 140321912311807, +SNULL, 140321816186879, 140321883160575, +STORE, 140321816051712, 140321816186879, +STORE, 140321816186880, 140321883160575, +SNULL, 140321912315903, 140321920704511, +STORE, 140321912311808, 140321912315903, +STORE, 140321912315904, 140321920704511, +STORE, 140321903919104, 140321912311807, +SNULL, 140321903923199, 140321912311807, +STORE, 140321903919104, 140321903923199, +STORE, 140321903923200, 140321912311807, +STORE, 140321895526400, 140321903919103, +SNULL, 140321895530495, 140321903919103, +STORE, 140321895526400, 140321895530495, +STORE, 140321895530496, 140321903919103, +STORE, 140321887133696, 140321895526399, +SNULL, 140321887137791, 140321895526399, +STORE, 140321887133696, 140321887137791, +STORE, 140321887137792, 140321895526399, +STORE, 140321807659008, 140321816051711, +STORE, 140321673441280, 140321807659007, +SNULL, 140321673441280, 140321681833983, +STORE, 140321681833984, 140321807659007, +STORE, 140321673441280, 140321681833983, +ERASE, 140321673441280, 140321681833983, +SNULL, 140321748942847, 140321807659007, +STORE, 140321681833984, 140321748942847, +STORE, 140321748942848, 140321807659007, +ERASE, 140321748942848, 140321807659007, +STORE, 140321799266304, 140321816051711, +STORE, 140321790873600, 140321816051711, +STORE, 140321782480896, 140321816051711, +STORE, 140321547616256, 140321748942847, +SNULL, 140321614725119, 140321748942847, +STORE, 140321547616256, 140321614725119, +STORE, 140321614725120, 140321748942847, +SNULL, 140321614725120, 140321681833983, +STORE, 140321681833984, 140321748942847, +STORE, 140321614725120, 140321681833983, +ERASE, 140321614725120, 140321681833983, +SNULL, 140321681969151, 140321748942847, +STORE, 140321681833984, 140321681969151, +STORE, 140321681969152, 140321748942847, +STORE, 140321547616256, 140321681833983, +SNULL, 140321547616256, 140321614725119, +STORE, 140321614725120, 140321681833983, +STORE, 140321547616256, 140321614725119, +SNULL, 140321614860287, 140321681833983, +STORE, 140321614725120, 140321614860287, +STORE, 140321614860288, 140321681833983, +SNULL, 140321547751423, 140321614725119, +STORE, 140321547616256, 140321547751423, +STORE, 140321547751424, 140321614725119, +STORE, 140321480507392, 140321547616255, +SNULL, 140321782480896, 140321799266303, +STORE, 140321799266304, 140321816051711, +STORE, 140321782480896, 140321799266303, +SNULL, 140321799270399, 140321816051711, +STORE, 140321799266304, 140321799270399, +STORE, 140321799270400, 140321816051711, +STORE, 140321774088192, 140321799266303, +SNULL, 140321774088192, 140321790873599, +STORE, 140321790873600, 140321799266303, +STORE, 140321774088192, 140321790873599, +SNULL, 140321790877695, 140321799266303, +STORE, 140321790873600, 140321790877695, +STORE, 140321790877696, 140321799266303, +SNULL, 140321480642559, 140321547616255, +STORE, 140321480507392, 140321480642559, +STORE, 140321480642560, 140321547616255, +SNULL, 140321774088192, 140321782480895, +STORE, 140321782480896, 140321790873599, +STORE, 140321774088192, 140321782480895, +SNULL, 140321782484991, 140321790873599, +STORE, 140321782480896, 140321782484991, +STORE, 140321782484992, 140321790873599, +SNULL, 140321799270400, 140321807659007, +STORE, 140321807659008, 140321816051711, +STORE, 140321799270400, 140321807659007, +SNULL, 140321807663103, 140321816051711, +STORE, 140321807659008, 140321807663103, +STORE, 140321807663104, 140321816051711, +STORE, 140321765695488, 140321782480895, +STORE, 140321757302784, 140321782480895, +SNULL, 140321757306879, 140321782480895, +STORE, 140321757302784, 140321757306879, +STORE, 140321757306880, 140321782480895, +STORE, 140321472114688, 140321480507391, +STORE, 140321463721984, 140321480507391, +SNULL, 140321463726079, 140321480507391, +STORE, 140321463721984, 140321463726079, +STORE, 140321463726080, 140321480507391, +SNULL, 140321757306880, 140321774088191, +STORE, 140321774088192, 140321782480895, +STORE, 140321757306880, 140321774088191, +SNULL, 140321774092287, 140321782480895, +STORE, 140321774088192, 140321774092287, +STORE, 140321774092288, 140321782480895, +SNULL, 140321463726080, 140321472114687, +STORE, 140321472114688, 140321480507391, +STORE, 140321463726080, 140321472114687, +SNULL, 140321472118783, 140321480507391, +STORE, 140321472114688, 140321472118783, +STORE, 140321472118784, 140321480507391, +SNULL, 140321757306880, 140321765695487, +STORE, 140321765695488, 140321774088191, +STORE, 140321757306880, 140321765695487, +SNULL, 140321765699583, 140321774088191, +STORE, 140321765695488, 140321765699583, +STORE, 140321765699584, 140321774088191, +STORE, 140321455329280, 140321463721983, +SNULL, 140321455333375, 140321463721983, +STORE, 140321455329280, 140321455333375, +STORE, 140321455333376, 140321463721983, +STORE, 140321446936576, 140321455329279, +STORE, 140321438543872, 140321455329279, +STORE, 140321430151168, 140321455329279, +SNULL, 140321430155263, 140321455329279, +STORE, 140321430151168, 140321430155263, +STORE, 140321430155264, 140321455329279, +SNULL, 140321430155264, 140321446936575, +STORE, 140321446936576, 140321455329279, +STORE, 140321430155264, 140321446936575, +SNULL, 140321446940671, 140321455329279, +STORE, 140321446936576, 140321446940671, +STORE, 140321446940672, 140321455329279, +SNULL, 140321430155264, 140321438543871, +STORE, 140321438543872, 140321446936575, +STORE, 140321430155264, 140321438543871, +SNULL, 140321438547967, 140321446936575, +STORE, 140321438543872, 140321438547967, +STORE, 140321438547968, 140321446936575, +STORE, 140321421758464, 140321430151167, +SNULL, 140321421762559, 140321430151167, +STORE, 140321421758464, 140321421762559, +STORE, 140321421762560, 140321430151167, +STORE, 140321413365760, 140321421758463, +SNULL, 140321413369855, 140321421758463, +STORE, 140321413365760, 140321413369855, +STORE, 140321413369856, 140321421758463, +STORE, 140321404973056, 140321413365759, +SNULL, 140321404977151, 140321413365759, +STORE, 140321404973056, 140321404977151, +STORE, 140321404977152, 140321413365759, +STORE, 140321396580352, 140321404973055, +STORE, 140321388187648, 140321404973055, +STORE, 140321253969920, 140321388187647, +SNULL, 140321253969920, 140321279180799, +STORE, 140321279180800, 140321388187647, +STORE, 140321253969920, 140321279180799, +ERASE, 140321253969920, 140321279180799, +SNULL, 140321346289663, 140321388187647, +STORE, 140321279180800, 140321346289663, +STORE, 140321346289664, 140321388187647, +ERASE, 140321346289664, 140321388187647, +STORE, 140321144963072, 140321346289663, +STORE, 140321379794944, 140321404973055, +STORE, 140321371402240, 140321404973055, +STORE, 140321010745344, 140321346289663, +STORE, 140321363009536, 140321404973055, +SNULL, 140321077854207, 140321346289663, +STORE, 140321010745344, 140321077854207, +STORE, 140321077854208, 140321346289663, +SNULL, 140321077854208, 140321144963071, +STORE, 140321144963072, 140321346289663, +STORE, 140321077854208, 140321144963071, +ERASE, 140321077854208, 140321144963071, +STORE, 140321354616832, 140321404973055, +STORE, 140321136570368, 140321144963071, +STORE, 140320943636480, 140321077854207, +STORE, 140320876527616, 140321077854207, +STORE, 140321128177664, 140321144963071, +SNULL, 140320876662783, 140321077854207, +STORE, 140320876527616, 140320876662783, +STORE, 140320876662784, 140321077854207, +STORE, 140321119784960, 140321144963071, +STORE, 140321111392256, 140321144963071, +STORE, 140320742309888, 140320876527615, +STORE, 140321102999552, 140321144963071, +STORE, 140320608092160, 140320876527615, +SNULL, 140320675201023, 140320876527615, +STORE, 140320608092160, 140320675201023, +STORE, 140320675201024, 140320876527615, +SNULL, 140320675201024, 140320742309887, +STORE, 140320742309888, 140320876527615, +STORE, 140320675201024, 140320742309887, +ERASE, 140320675201024, 140320742309887, +STORE, 140321094606848, 140321144963071, +STORE, 140321086214144, 140321144963071, +STORE, 140320608092160, 140320876527615, +SNULL, 140320608092160, 140320675201023, +STORE, 140320675201024, 140320876527615, +STORE, 140320608092160, 140320675201023, +SNULL, 140320675336191, 140320876527615, +STORE, 140320675201024, 140320675336191, +STORE, 140320675336192, 140320876527615, +STORE, 140320599699456, 140320608092159, +STORE, 140320591306752, 140320608092159, +STORE, 140320457089024, 140320591306751, +STORE, 140320448696320, 140320457089023, +STORE, 140320314478592, 140320448696319, +SNULL, 140321144963072, 140321279180799, +STORE, 140321279180800, 140321346289663, +STORE, 140321144963072, 140321279180799, +SNULL, 140321279315967, 140321346289663, +STORE, 140321279180800, 140321279315967, +STORE, 140321279315968, 140321346289663, +SNULL, 140321086214144, 140321136570367, +STORE, 140321136570368, 140321144963071, +STORE, 140321086214144, 140321136570367, +SNULL, 140321136574463, 140321144963071, +STORE, 140321136570368, 140321136574463, +STORE, 140321136574464, 140321144963071, +SNULL, 140321212071935, 140321279180799, +STORE, 140321144963072, 140321212071935, +STORE, 140321212071936, 140321279180799, +ERASE, 140321212071936, 140321279180799, +SNULL, 140321145098239, 140321212071935, +STORE, 140321144963072, 140321145098239, +STORE, 140321145098240, 140321212071935, +SNULL, 140320876662784, 140321010745343, +STORE, 140321010745344, 140321077854207, +STORE, 140320876662784, 140321010745343, +SNULL, 140321010880511, 140321077854207, +STORE, 140321010745344, 140321010880511, +STORE, 140321010880512, 140321077854207, +SNULL, 140321354616832, 140321379794943, +STORE, 140321379794944, 140321404973055, +STORE, 140321354616832, 140321379794943, +SNULL, 140321379799039, 140321404973055, +STORE, 140321379794944, 140321379799039, +STORE, 140321379799040, 140321404973055, +SNULL, 140320876662784, 140320943636479, +STORE, 140320943636480, 140321010745343, +STORE, 140320876662784, 140320943636479, +SNULL, 140320943771647, 140321010745343, +STORE, 140320943636480, 140320943771647, +STORE, 140320943771648, 140321010745343, +SNULL, 140320809418751, 140320876527615, +STORE, 140320675336192, 140320809418751, +STORE, 140320809418752, 140320876527615, +ERASE, 140320809418752, 140320876527615, +SNULL, 140320675336192, 140320742309887, +STORE, 140320742309888, 140320809418751, +STORE, 140320675336192, 140320742309887, +SNULL, 140320742445055, 140320809418751, +STORE, 140320742309888, 140320742445055, +STORE, 140320742445056, 140320809418751, +SNULL, 140320608227327, 140320675201023, +STORE, 140320608092160, 140320608227327, +STORE, 140320608227328, 140320675201023, +SNULL, 140320457089024, 140320473874431, +STORE, 140320473874432, 140320591306751, +STORE, 140320457089024, 140320473874431, +ERASE, 140320457089024, 140320473874431, +SNULL, 140320540983295, 140320591306751, +STORE, 140320473874432, 140320540983295, +STORE, 140320540983296, 140320591306751, +ERASE, 140320540983296, 140320591306751, +SNULL, 140320314478592, 140320339656703, +STORE, 140320339656704, 140320448696319, +STORE, 140320314478592, 140320339656703, +ERASE, 140320314478592, 140320339656703, +SNULL, 140321086214144, 140321128177663, +STORE, 140321128177664, 140321136570367, +STORE, 140321086214144, 140321128177663, +SNULL, 140321128181759, 140321136570367, +STORE, 140321128177664, 140321128181759, +STORE, 140321128181760, 140321136570367, +SNULL, 140321354616832, 140321371402239, +STORE, 140321371402240, 140321379794943, +STORE, 140321354616832, 140321371402239, +SNULL, 140321371406335, 140321379794943, +STORE, 140321371402240, 140321371406335, +STORE, 140321371406336, 140321379794943, +SNULL, 140320591310847, 140320608092159, +STORE, 140320591306752, 140320591310847, +STORE, 140320591310848, 140320608092159, +SNULL, 140321354616832, 140321363009535, +STORE, 140321363009536, 140321371402239, +STORE, 140321354616832, 140321363009535, +SNULL, 140321363013631, 140321371402239, +STORE, 140321363009536, 140321363013631, +STORE, 140321363013632, 140321371402239, +SNULL, 140321086214144, 140321119784959, +STORE, 140321119784960, 140321128177663, +STORE, 140321086214144, 140321119784959, +SNULL, 140321119789055, 140321128177663, +STORE, 140321119784960, 140321119789055, +STORE, 140321119789056, 140321128177663, +SNULL, 140321086218239, 140321119784959, +STORE, 140321086214144, 140321086218239, +STORE, 140321086218240, 140321119784959, +SNULL, 140321086218240, 140321094606847, +STORE, 140321094606848, 140321119784959, +STORE, 140321086218240, 140321094606847, +SNULL, 140321094610943, 140321119784959, +STORE, 140321094606848, 140321094610943, +STORE, 140321094610944, 140321119784959, +SNULL, 140320474009599, 140320540983295, +STORE, 140320473874432, 140320474009599, +STORE, 140320474009600, 140320540983295, +SNULL, 140320406765567, 140320448696319, +STORE, 140320339656704, 140320406765567, +STORE, 140320406765568, 140320448696319, +ERASE, 140320406765568, 140320448696319, +SNULL, 140320339791871, 140320406765567, +STORE, 140320339656704, 140320339791871, +STORE, 140320339791872, 140320406765567, +STORE, 140321270788096, 140321279180799, +STORE, 140321262395392, 140321279180799, +STORE, 140321254002688, 140321279180799, +SNULL, 140321254002688, 140321262395391, +STORE, 140321262395392, 140321279180799, +STORE, 140321254002688, 140321262395391, +SNULL, 140321262399487, 140321279180799, +STORE, 140321262395392, 140321262399487, +STORE, 140321262399488, 140321279180799, +STORE, 140321245609984, 140321262395391, +STORE, 140321237217280, 140321262395391, +SNULL, 140321237217280, 140321245609983, +STORE, 140321245609984, 140321262395391, +STORE, 140321237217280, 140321245609983, +SNULL, 140321245614079, 140321262395391, +STORE, 140321245609984, 140321245614079, +STORE, 140321245614080, 140321262395391, +SNULL, 140321379799040, 140321388187647, +STORE, 140321388187648, 140321404973055, +STORE, 140321379799040, 140321388187647, +SNULL, 140321388191743, 140321404973055, +STORE, 140321388187648, 140321388191743, +STORE, 140321388191744, 140321404973055, +SNULL, 140321354620927, 140321363009535, +STORE, 140321354616832, 140321354620927, +STORE, 140321354620928, 140321363009535, +SNULL, 140321388191744, 140321396580351, +STORE, 140321396580352, 140321404973055, +STORE, 140321388191744, 140321396580351, +SNULL, 140321396584447, 140321404973055, +STORE, 140321396580352, 140321396584447, +STORE, 140321396584448, 140321404973055, +SNULL, 140321094610944, 140321111392255, +STORE, 140321111392256, 140321119784959, +STORE, 140321094610944, 140321111392255, +SNULL, 140321111396351, 140321119784959, +STORE, 140321111392256, 140321111396351, +STORE, 140321111396352, 140321119784959, +STORE, 140321228824576, 140321245609983, +SNULL, 140321094610944, 140321102999551, +STORE, 140321102999552, 140321111392255, +STORE, 140321094610944, 140321102999551, +SNULL, 140321103003647, 140321111392255, +STORE, 140321102999552, 140321103003647, +STORE, 140321103003648, 140321111392255, +STORE, 140321220431872, 140321245609983, +SNULL, 140321220435967, 140321245609983, +STORE, 140321220431872, 140321220435967, +STORE, 140321220435968, 140321245609983, +STORE, 140320868134912, 140320876527615, +SNULL, 140320868139007, 140320876527615, +STORE, 140320868134912, 140320868139007, +STORE, 140320868139008, 140320876527615, +SNULL, 140320591310848, 140320599699455, +STORE, 140320599699456, 140320608092159, +STORE, 140320591310848, 140320599699455, +SNULL, 140320599703551, 140320608092159, +STORE, 140320599699456, 140320599703551, +STORE, 140320599703552, 140320608092159, +STORE, 140320859742208, 140320868134911, +SNULL, 140321262399488, 140321270788095, +STORE, 140321270788096, 140321279180799, +STORE, 140321262399488, 140321270788095, +SNULL, 140321270792191, 140321279180799, +STORE, 140321270788096, 140321270792191, +STORE, 140321270792192, 140321279180799, +STORE, 140320851349504, 140320868134911, +STORE, 140320842956800, 140320868134911, +STORE, 140320834564096, 140320868134911, +STORE, 140320826171392, 140320868134911, +SNULL, 140320826171392, 140320834564095, +STORE, 140320834564096, 140320868134911, +STORE, 140320826171392, 140320834564095, +SNULL, 140320834568191, 140320868134911, +STORE, 140320834564096, 140320834568191, +STORE, 140320834568192, 140320868134911, +SNULL, 140321220435968, 140321228824575, +STORE, 140321228824576, 140321245609983, +STORE, 140321220435968, 140321228824575, +SNULL, 140321228828671, 140321245609983, +STORE, 140321228824576, 140321228828671, +STORE, 140321228828672, 140321245609983, +STORE, 140320817778688, 140320834564095, +SNULL, 140320817782783, 140320834564095, +STORE, 140320817778688, 140320817782783, +STORE, 140320817782784, 140320834564095, +STORE, 140320582914048, 140320591306751, +SNULL, 140321228828672, 140321237217279, +STORE, 140321237217280, 140321245609983, +STORE, 140321228828672, 140321237217279, +SNULL, 140321237221375, 140321245609983, +STORE, 140321237217280, 140321237221375, +STORE, 140321237221376, 140321245609983, +SNULL, 140320448700415, 140320457089023, +STORE, 140320448696320, 140320448700415, +STORE, 140320448700416, 140320457089023, +SNULL, 140321245614080, 140321254002687, +STORE, 140321254002688, 140321262395391, +STORE, 140321245614080, 140321254002687, +SNULL, 140321254006783, 140321262395391, +STORE, 140321254002688, 140321254006783, +STORE, 140321254006784, 140321262395391, +STORE, 140320574521344, 140320591306751, +SNULL, 140320574525439, 140320591306751, +STORE, 140320574521344, 140320574525439, +STORE, 140320574525440, 140320591306751, +STORE, 140320566128640, 140320574521343, +SNULL, 140320566132735, 140320574521343, +STORE, 140320566128640, 140320566132735, +STORE, 140320566132736, 140320574521343, +SNULL, 140320574525440, 140320582914047, +STORE, 140320582914048, 140320591306751, +STORE, 140320574525440, 140320582914047, +SNULL, 140320582918143, 140320591306751, +STORE, 140320582914048, 140320582918143, +STORE, 140320582918144, 140320591306751, +STORE, 140320557735936, 140320566128639, +SNULL, 140320557740031, 140320566128639, +STORE, 140320557735936, 140320557740031, +STORE, 140320557740032, 140320566128639, +STORE, 140320549343232, 140320557735935, +STORE, 140320465481728, 140320473874431, +STORE, 140320448700416, 140320473874431, +SNULL, 140320834568192, 140320859742207, +STORE, 140320859742208, 140320868134911, +STORE, 140320834568192, 140320859742207, +SNULL, 140320859746303, 140320868134911, +STORE, 140320859742208, 140320859746303, +STORE, 140320859746304, 140320868134911, +STORE, 140320440303616, 140320448696319, +STORE, 140320431910912, 140320448696319, +SNULL, 140320834568192, 140320851349503, +STORE, 140320851349504, 140320859742207, +STORE, 140320834568192, 140320851349503, +SNULL, 140320851353599, 140320859742207, +STORE, 140320851349504, 140320851353599, +STORE, 140320851353600, 140320859742207, +SNULL, 140320817782784, 140320826171391, +STORE, 140320826171392, 140320834564095, +STORE, 140320817782784, 140320826171391, +SNULL, 140320826175487, 140320834564095, +STORE, 140320826171392, 140320826175487, +STORE, 140320826175488, 140320834564095, +SNULL, 140320834568192, 140320842956799, +STORE, 140320842956800, 140320851349503, +STORE, 140320834568192, 140320842956799, +SNULL, 140320842960895, 140320851349503, +STORE, 140320842956800, 140320842960895, +STORE, 140320842960896, 140320851349503, +STORE, 140320423518208, 140320448696319, +SNULL, 140320423522303, 140320448696319, +STORE, 140320423518208, 140320423522303, +STORE, 140320423522304, 140320448696319, +STORE, 140320415125504, 140320423518207, +STORE, 140320331264000, 140320339656703, +STORE, 140320322871296, 140320339656703, +STORE, 140320314478592, 140320339656703, +SNULL, 140320314482687, 140320339656703, +STORE, 140320314478592, 140320314482687, +STORE, 140320314482688, 140320339656703, +STORE, 140320306085888, 140320314478591, +SNULL, 140320306089983, 140320314478591, +STORE, 140320306085888, 140320306089983, +STORE, 140320306089984, 140320314478591, +STORE, 140320297693184, 140320306085887, +SNULL, 140320297697279, 140320306085887, +STORE, 140320297693184, 140320297697279, +STORE, 140320297697280, 140320306085887, +STORE, 140320289300480, 140320297693183, +STORE, 140320280907776, 140320297693183, +SNULL, 140320280911871, 140320297693183, +STORE, 140320280907776, 140320280911871, +STORE, 140320280911872, 140320297693183, +SNULL, 140320423522304, 140320431910911, +STORE, 140320431910912, 140320448696319, +STORE, 140320423522304, 140320431910911, +SNULL, 140320431915007, 140320448696319, +STORE, 140320431910912, 140320431915007, +STORE, 140320431915008, 140320448696319, +SNULL, 140320549347327, 140320557735935, +STORE, 140320549343232, 140320549347327, +STORE, 140320549347328, 140320557735935, +STORE, 140320272515072, 140320280907775, +SNULL, 140320448700416, 140320457089023, +STORE, 140320457089024, 140320473874431, +STORE, 140320448700416, 140320457089023, +SNULL, 140320457093119, 140320473874431, +STORE, 140320457089024, 140320457093119, +STORE, 140320457093120, 140320473874431, +STORE, 140320264122368, 140320280907775, +SNULL, 140320457093120, 140320465481727, +STORE, 140320465481728, 140320473874431, +STORE, 140320457093120, 140320465481727, +SNULL, 140320465485823, 140320473874431, +STORE, 140320465481728, 140320465485823, +STORE, 140320465485824, 140320473874431, +SNULL, 140320431915008, 140320440303615, +STORE, 140320440303616, 140320448696319, +STORE, 140320431915008, 140320440303615, +SNULL, 140320440307711, 140320448696319, +STORE, 140320440303616, 140320440307711, +STORE, 140320440307712, 140320448696319, +STORE, 140320255729664, 140320280907775, +STORE, 140320247336960, 140320280907775, +SNULL, 140320247341055, 140320280907775, +STORE, 140320247336960, 140320247341055, +STORE, 140320247341056, 140320280907775, +STORE, 140320238944256, 140320247336959, +STORE, 140320230551552, 140320247336959, +SNULL, 140320230551552, 140320238944255, +STORE, 140320238944256, 140320247336959, +STORE, 140320230551552, 140320238944255, +SNULL, 140320238948351, 140320247336959, +STORE, 140320238944256, 140320238948351, +STORE, 140320238948352, 140320247336959, +SNULL, 140320314482688, 140320331263999, +STORE, 140320331264000, 140320339656703, +STORE, 140320314482688, 140320331263999, +SNULL, 140320331268095, 140320339656703, +STORE, 140320331264000, 140320331268095, +STORE, 140320331268096, 140320339656703, +SNULL, 140320280911872, 140320289300479, +STORE, 140320289300480, 140320297693183, +STORE, 140320280911872, 140320289300479, +SNULL, 140320289304575, 140320297693183, +STORE, 140320289300480, 140320289304575, +STORE, 140320289304576, 140320297693183, +SNULL, 140320415129599, 140320423518207, +STORE, 140320415125504, 140320415129599, +STORE, 140320415129600, 140320423518207, +STORE, 140320222158848, 140320238944255, +STORE, 140320213766144, 140320238944255, +STORE, 140320205373440, 140320238944255, +SNULL, 140320205377535, 140320238944255, +STORE, 140320205373440, 140320205377535, +STORE, 140320205377536, 140320238944255, +SNULL, 140320314482688, 140320322871295, +STORE, 140320322871296, 140320331263999, +STORE, 140320314482688, 140320322871295, +SNULL, 140320322875391, 140320331263999, +STORE, 140320322871296, 140320322875391, +STORE, 140320322875392, 140320331263999, +SNULL, 140320247341056, 140320272515071, +STORE, 140320272515072, 140320280907775, +STORE, 140320247341056, 140320272515071, +SNULL, 140320272519167, 140320280907775, +STORE, 140320272515072, 140320272519167, +STORE, 140320272519168, 140320280907775, +SNULL, 140320247341056, 140320264122367, +STORE, 140320264122368, 140320272515071, +STORE, 140320247341056, 140320264122367, +SNULL, 140320264126463, 140320272515071, +STORE, 140320264122368, 140320264126463, +STORE, 140320264126464, 140320272515071, +SNULL, 140320205377536, 140320230551551, +STORE, 140320230551552, 140320238944255, +STORE, 140320205377536, 140320230551551, +SNULL, 140320230555647, 140320238944255, +STORE, 140320230551552, 140320230555647, +STORE, 140320230555648, 140320238944255, +STORE, 140320196980736, 140320205373439, +SNULL, 140320196984831, 140320205373439, +STORE, 140320196980736, 140320196984831, +STORE, 140320196984832, 140320205373439, +STORE, 140320188588032, 140320196980735, +SNULL, 140320247341056, 140320255729663, +STORE, 140320255729664, 140320264122367, +STORE, 140320247341056, 140320255729663, +SNULL, 140320255733759, 140320264122367, +STORE, 140320255729664, 140320255733759, +STORE, 140320255733760, 140320264122367, +STORE, 140320180195328, 140320196980735, +SNULL, 140320180199423, 140320196980735, +STORE, 140320180195328, 140320180199423, +STORE, 140320180199424, 140320196980735, +STORE, 140320171802624, 140320180195327, +STORE, 140320163409920, 140320180195327, +SNULL, 140320163414015, 140320180195327, +STORE, 140320163409920, 140320163414015, +STORE, 140320163414016, 140320180195327, +SNULL, 140320205377536, 140320222158847, +STORE, 140320222158848, 140320230551551, +STORE, 140320205377536, 140320222158847, +SNULL, 140320222162943, 140320230551551, +STORE, 140320222158848, 140320222162943, +STORE, 140320222162944, 140320230551551, +SNULL, 140320205377536, 140320213766143, +STORE, 140320213766144, 140320222158847, +STORE, 140320205377536, 140320213766143, +SNULL, 140320213770239, 140320222158847, +STORE, 140320213766144, 140320213770239, +STORE, 140320213770240, 140320222158847, +STORE, 140320155017216, 140320163409919, +SNULL, 140320180199424, 140320188588031, +STORE, 140320188588032, 140320196980735, +STORE, 140320180199424, 140320188588031, +SNULL, 140320188592127, 140320196980735, +STORE, 140320188588032, 140320188592127, +STORE, 140320188592128, 140320196980735, +SNULL, 140320155021311, 140320163409919, +STORE, 140320155017216, 140320155021311, +STORE, 140320155021312, 140320163409919, +SNULL, 140320163414016, 140320171802623, +STORE, 140320171802624, 140320180195327, +STORE, 140320163414016, 140320171802623, +SNULL, 140320171806719, 140320180195327, +STORE, 140320171802624, 140320171806719, +STORE, 140320171806720, 140320180195327, +STORE, 140320146624512, 140320155017215, +SNULL, 140320146628607, 140320155017215, +STORE, 140320146624512, 140320146628607, +STORE, 140320146628608, 140320155017215, +STORE, 140321937321984, 140321937350655, +STORE, 140321884942336, 140321887133695, +SNULL, 140321884942336, 140321885032447, +STORE, 140321885032448, 140321887133695, +STORE, 140321884942336, 140321885032447, +SNULL, 140321887125503, 140321887133695, +STORE, 140321885032448, 140321887125503, +STORE, 140321887125504, 140321887133695, +ERASE, 140321887125504, 140321887133695, +STORE, 140321887125504, 140321887133695, +SNULL, 140321887129599, 140321887133695, +STORE, 140321887125504, 140321887129599, +STORE, 140321887129600, 140321887133695, +ERASE, 140321937321984, 140321937350655, +ERASE, 140321086214144, 140321086218239, +ERASE, 140321086218240, 140321094606847, +ERASE, 140321119784960, 140321119789055, +ERASE, 140321119789056, 140321128177663, +ERASE, 140321245609984, 140321245614079, +ERASE, 140321245614080, 140321254002687, +ERASE, 140320574521344, 140320574525439, +ERASE, 140320574525440, 140320582914047, +ERASE, 140320297693184, 140320297697279, +ERASE, 140320297697280, 140320306085887, +ERASE, 140321354616832, 140321354620927, +ERASE, 140321354620928, 140321363009535, +ERASE, 140320834564096, 140320834568191, +ERASE, 140320834568192, 140320842956799, +ERASE, 140320591306752, 140320591310847, +ERASE, 140320591310848, 140320599699455, +ERASE, 140321136570368, 140321136574463, +ERASE, 140321136574464, 140321144963071, +ERASE, 140321237217280, 140321237221375, +ERASE, 140321237221376, 140321245609983, +ERASE, 140321363009536, 140321363013631, +ERASE, 140321363013632, 140321371402239, +ERASE, 140320599699456, 140320599703551, +ERASE, 140320599703552, 140320608092159, +ERASE, 140321396580352, 140321396584447, +ERASE, 140321396584448, 140321404973055, +ERASE, 140320566128640, 140320566132735, +ERASE, 140320566132736, 140320574521343, +ERASE, 140321094606848, 140321094610943, +ERASE, 140321094610944, 140321102999551, +ERASE, 140320582914048, 140320582918143, +ERASE, 140320582918144, 140320591306751, +ERASE, 140320289300480, 140320289304575, +ERASE, 140320289304576, 140320297693183, +ERASE, 140320163409920, 140320163414015, + }; + unsigned long set41[] = { +STORE, 140737488347136, 140737488351231, +STORE, 140728157171712, 140737488351231, +SNULL, 140728157175807, 140737488351231, +STORE, 140728157171712, 140728157175807, +STORE, 140728157040640, 140728157175807, +STORE, 94376106364928, 94376108613631, +SNULL, 94376106487807, 94376108613631, +STORE, 94376106364928, 94376106487807, +STORE, 94376106487808, 94376108613631, +SNULL, 94376106487808, 94376108613631, +STORE, 94376108584960, 94376108593151, +STORE, 94376108593152, 94376108613631, +STORE, 140113496432640, 140113498685439, +SNULL, 140113496575999, 140113498685439, +STORE, 140113496432640, 140113496575999, +STORE, 140113496576000, 140113498685439, +SNULL, 140113496576000, 140113498685439, +STORE, 140113498673152, 140113498681343, +STORE, 140113498681344, 140113498685439, +STORE, 140728157609984, 140728157618175, +STORE, 140728157593600, 140728157609983, +STORE, 140113498636288, 140113498673151, +STORE, 140113498628096, 140113498636287, +STORE, 140113492635648, 140113496432639, +SNULL, 140113492635648, 140113494294527, +STORE, 140113494294528, 140113496432639, +STORE, 140113492635648, 140113494294527, +SNULL, 140113496391679, 140113496432639, +STORE, 140113494294528, 140113496391679, +STORE, 140113496391680, 140113496432639, +SNULL, 140113496391680, 140113496416255, +STORE, 140113496416256, 140113496432639, +STORE, 140113496391680, 140113496416255, +SNULL, 140113496391680, 140113496416255, +STORE, 140113496391680, 140113496416255, +SNULL, 140113496416256, 140113496432639, +STORE, 140113496416256, 140113496432639, +SNULL, 140113496408063, 140113496416255, +STORE, 140113496391680, 140113496408063, +STORE, 140113496408064, 140113496416255, +SNULL, 94376108589055, 94376108593151, +STORE, 94376108584960, 94376108589055, +STORE, 94376108589056, 94376108593151, +SNULL, 140113498677247, 140113498681343, +STORE, 140113498673152, 140113498677247, +STORE, 140113498677248, 140113498681343, +SNULL, 140113498636288, 140113498673151, +STORE, 94376135090176, 94376135094271, +STORE, 94376135090176, 94376135098367, +STORE, 94376139288576, 94376139292671, +STORE, 94376143482880, 94376143486975, +STORE, 94376147677184, 94376147681279, +STORE, 94376151871488, 94376151875583, +STORE, 94376156065792, 94376156069887, +STORE, 94376160260096, 94376160264191, +STORE, 94376164454400, 94376164458495, +STORE, 94376168648704, 94376168652799, +STORE, 94376172843008, 94376172847103, +STORE, 94376177037312, 94376177041407, +STORE, 94376181231616, 94376181235711, +STORE, 94376185425920, 94376185430015, +STORE, 94376189620224, 94376189624319, +STORE, 94376193814528, 94376193818623, +STORE, 94376198008832, 94376198012927, +STORE, 94376202203136, 94376202207231, +STORE, 94376206397440, 94376206401535, +STORE, 94376210591744, 94376210595839, +STORE, 94376214786048, 94376214790143, +STORE, 94376218980352, 94376218984447, +STORE, 94376223174656, 94376223178751, +STORE, 94376227368960, 94376227373055, +STORE, 94376231563264, 94376231567359, +STORE, 94376235757568, 94376235761663, +STORE, 94376239951872, 94376239955967, +STORE, 94376244146176, 94376244150271, +STORE, 94376248340480, 94376248344575, +STORE, 94376252534784, 94376252538879, +STORE, 94376256729088, 94376256733183, +STORE, 94376260923392, 94376260927487, +STORE, 94376265117696, 94376265121791, +STORE, 94376269312000, 94376269316095, +STORE, 94376273506304, 94376273510399, +STORE, 94376277700608, 94376277704703, +STORE, 94376281894912, 94376281899007, +STORE, 94376286089216, 94376286093311, +STORE, 94376290283520, 94376290287615, +STORE, 94376294477824, 94376294481919, +STORE, 94376298672128, 94376298676223, +STORE, 94376302866432, 94376302870527, +STORE, 94376307060736, 94376307064831, +STORE, 94376311255040, 94376311259135, +STORE, 94376315449344, 94376315453439, +STORE, 94376319643648, 94376319647743, +STORE, 94376323837952, 94376323842047, +STORE, 94376328032256, 94376328036351, +STORE, 94376332226560, 94376332230655, +STORE, 94376336420864, 94376336424959, +STORE, 94376340615168, 94376340619263, +STORE, 94376344809472, 94376344813567, +STORE, 94376349003776, 94376349007871, +STORE, 94376353198080, 94376353202175, +STORE, 94376357392384, 94376357396479, +STORE, 94376361586688, 94376361590783, +STORE, 94376365780992, 94376365785087, +STORE, 94376369975296, 94376369979391, +STORE, 94376374169600, 94376374173695, +STORE, 94376378363904, 94376378367999, +STORE, 94376382558208, 94376382562303, +STORE, 94376386752512, 94376386756607, +STORE, 94376390946816, 94376390950911, +STORE, 94376395141120, 94376395145215, +STORE, 94376399335424, 94376399339519, +STORE, 94376403529728, 94376403533823, +STORE, 94376407724032, 94376407728127, +STORE, 94376411918336, 94376411922431, +STORE, 94376416112640, 94376416116735, +STORE, 94376420306944, 94376420311039, +STORE, 94376424501248, 94376424505343, +STORE, 94376428695552, 94376428699647, +STORE, 94376432889856, 94376432893951, +STORE, 94376437084160, 94376437088255, +STORE, 94376441278464, 94376441282559, +STORE, 94376445472768, 94376445476863, +STORE, 94376449667072, 94376449671167, +STORE, 94376453861376, 94376453865471, +STORE, 94376458055680, 94376458059775, +STORE, 94376462249984, 94376462254079, +STORE, 94376466444288, 94376466448383, +STORE, 94376470638592, 94376470642687, +STORE, 94376474832896, 94376474836991, +STORE, 94376479027200, 94376479031295, +STORE, 94376483221504, 94376483225599, +STORE, 94376487415808, 94376487419903, +STORE, 94376491610112, 94376491614207, +STORE, 94376495804416, 94376495808511, +STORE, 94376499998720, 94376500002815, +STORE, 94376504193024, 94376504197119, +STORE, 94376508387328, 94376508391423, +STORE, 94376512581632, 94376512585727, +STORE, 94376516775936, 94376516780031, +STORE, 94376520970240, 94376520974335, +STORE, 94376525164544, 94376525168639, +STORE, 94376529358848, 94376529362943, +STORE, 94376533553152, 94376533557247, +STORE, 94376537747456, 94376537751551, +STORE, 94376541941760, 94376541945855, +STORE, 94376546136064, 94376546140159, +STORE, 94376550330368, 94376550334463, +STORE, 94376554524672, 94376554528767, +STORE, 94376558718976, 94376558723071, +STORE, 94376562913280, 94376562917375, +STORE, 94376567107584, 94376567111679, +STORE, 94376571301888, 94376571305983, +STORE, 94376575496192, 94376575500287, +STORE, 94376579690496, 94376579694591, +STORE, 94376583884800, 94376583888895, +STORE, 94376588079104, 94376588083199, +STORE, 94376592273408, 94376592277503, +STORE, 94376596467712, 94376596471807, +STORE, 94376600662016, 94376600666111, +STORE, 94376604856320, 94376604860415, +STORE, 94376609050624, 94376609054719, +STORE, 94376613244928, 94376613249023, +STORE, 94376617439232, 94376617443327, +STORE, 94376621633536, 94376621637631, +STORE, 94376625827840, 94376625831935, +STORE, 94376630022144, 94376630026239, +STORE, 94376634216448, 94376634220543, +STORE, 94376638410752, 94376638414847, +STORE, 94376642605056, 94376642609151, +STORE, 94376646799360, 94376646803455, +STORE, 94376650993664, 94376650997759, +STORE, 94376655187968, 94376655192063, +STORE, 94376659382272, 94376659386367, +STORE, 94376663576576, 94376663580671, +STORE, 94376667770880, 94376667774975, +STORE, 94376671965184, 94376671969279, +STORE, 94376676159488, 94376676163583, +STORE, 94376680353792, 94376680357887, +STORE, 94376684548096, 94376684552191, +STORE, 94376688742400, 94376688746495, +STORE, 94376692936704, 94376692940799, +STORE, 94376697131008, 94376697135103, +STORE, 94376701325312, 94376701329407, +STORE, 94376705519616, 94376705523711, +STORE, 94376709713920, 94376709718015, +STORE, 94376713908224, 94376713912319, +STORE, 94376718102528, 94376718106623, +STORE, 94376722296832, 94376722300927, +STORE, 94376726491136, 94376726495231, +STORE, 94376730685440, 94376730689535, +STORE, 94376734879744, 94376734883839, +STORE, 94376739074048, 94376739078143, +STORE, 94376743268352, 94376743272447, +STORE, 94376747462656, 94376747466751, +STORE, 94376751656960, 94376751661055, +STORE, 94376755851264, 94376755855359, +STORE, 94376760045568, 94376760049663, +STORE, 94376764239872, 94376764243967, +STORE, 94376768434176, 94376768438271, +STORE, 94376772628480, 94376772632575, +STORE, 94376776822784, 94376776826879, +STORE, 94376781017088, 94376781021183, +STORE, 94376785211392, 94376785215487, +STORE, 94376789405696, 94376789409791, +STORE, 94376793600000, 94376793604095, +STORE, 94376797794304, 94376797798399, +STORE, 94376801988608, 94376801992703, +STORE, 94376806182912, 94376806187007, +STORE, 94376810377216, 94376810381311, +STORE, 94376814571520, 94376814575615, +STORE, 94376818765824, 94376818769919, +STORE, 94376822960128, 94376822964223, +STORE, 94376827154432, 94376827158527, +STORE, 94376831348736, 94376831352831, +STORE, 94376835543040, 94376835547135, +STORE, 94376839737344, 94376839741439, +STORE, 94376843931648, 94376843935743, +STORE, 94376848125952, 94376848130047, +STORE, 94376852320256, 94376852324351, +STORE, 94376856514560, 94376856518655, +STORE, 94376860708864, 94376860712959, +STORE, 94376864903168, 94376864907263, +STORE, 94376869097472, 94376869101567, +STORE, 94376873291776, 94376873295871, +STORE, 94376877486080, 94376877490175, +STORE, 94376881680384, 94376881684479, +STORE, 94376885874688, 94376885878783, +STORE, 94376890068992, 94376890073087, +STORE, 94376894263296, 94376894267391, +STORE, 94376898457600, 94376898461695, +STORE, 94376902651904, 94376902655999, +STORE, 94376906846208, 94376906850303, +STORE, 94376911040512, 94376911044607, +STORE, 94376915234816, 94376915238911, +STORE, 94376919429120, 94376919433215, +STORE, 94376923623424, 94376923627519, +STORE, 94376927817728, 94376927821823, +STORE, 94376932012032, 94376932016127, +STORE, 94376936206336, 94376936210431, +STORE, 94376940400640, 94376940404735, +STORE, 94376944594944, 94376944599039, +STORE, 94376948789248, 94376948793343, +STORE, 94376952983552, 94376952987647, +STORE, 94376957177856, 94376957181951, +STORE, 94376961372160, 94376961376255, +STORE, 94376965566464, 94376965570559, +STORE, 94376969760768, 94376969764863, +STORE, 94376973955072, 94376973959167, +STORE, 94376978149376, 94376978153471, +STORE, 94376982343680, 94376982347775, +STORE, 94376986537984, 94376986542079, +STORE, 94376990732288, 94376990736383, +STORE, 94376994926592, 94376994930687, +STORE, 94376999120896, 94376999124991, +STORE, 94377003315200, 94377003319295, +STORE, 94377007509504, 94377007513599, +STORE, 94377011703808, 94377011707903, +STORE, 94377015898112, 94377015902207, +STORE, 94377020092416, 94377020096511, +STORE, 94377024286720, 94377024290815, +STORE, 94377028481024, 94377028485119, +STORE, 94377032675328, 94377032679423, +STORE, 94377036869632, 94377036873727, +STORE, 94377041063936, 94377041068031, +STORE, 94377045258240, 94377045262335, +STORE, 94377049452544, 94377049456639, +STORE, 94377053646848, 94377053650943, +STORE, 94377057841152, 94377057845247, +STORE, 94377062035456, 94377062039551, +STORE, 94377066229760, 94377066233855, +STORE, 94377070424064, 94377070428159, +STORE, 94377074618368, 94377074622463, +STORE, 94377078812672, 94377078816767, +STORE, 94377083006976, 94377083011071, +STORE, 94377087201280, 94377087205375, +STORE, 94377091395584, 94377091399679, +STORE, 94377095589888, 94377095593983, +STORE, 94377099784192, 94377099788287, +STORE, 94377103978496, 94377103982591, +STORE, 94377108172800, 94377108176895, +STORE, 94377112367104, 94377112371199, +STORE, 94377116561408, 94377116565503, +STORE, 94377120755712, 94377120759807, +STORE, 94377124950016, 94377124954111, +STORE, 94377129144320, 94377129148415, +STORE, 94377133338624, 94377133342719, +STORE, 94377137532928, 94377137537023, +STORE, 94377141727232, 94377141731327, +STORE, 94377145921536, 94377145925631, +STORE, 94377150115840, 94377150119935, +STORE, 94377154310144, 94377154314239, +STORE, 94377158504448, 94377158508543, +STORE, 94377162698752, 94377162702847, +STORE, 94377166893056, 94377166897151, +STORE, 94377171087360, 94377171091455, +STORE, 94377175281664, 94377175285759, +STORE, 94377179475968, 94377179480063, +STORE, 94377183670272, 94377183674367, +STORE, 94377187864576, 94377187868671, +STORE, 94377192058880, 94377192062975, +STORE, 94377196253184, 94377196257279, +STORE, 94377200447488, 94377200451583, +STORE, 94377204641792, 94377204645887, +SNULL, 94376135094271, 94376135098367, +STORE, 94376135090176, 94376135094271, +STORE, 94376135094272, 94376135098367, +SNULL, 94376135094272, 94377208836095, + }; + unsigned long set42[] = { +STORE, 314572800, 1388314623, +STORE, 1462157312, 1462169599, +STORE, 1462169600, 1462185983, +STORE, 1462185984, 1462190079, +STORE, 1462190080, 1462194175, +STORE, 1462194176, 1462198271, +STORE, 1879986176, 1881800703, +STORE, 1881800704, 1882034175, +STORE, 1882034176, 1882193919, +STORE, 1882193920, 1882406911, +STORE, 1882406912, 1882451967, +STORE, 1882451968, 1882996735, +STORE, 1882996736, 1885892607, +STORE, 1885892608, 1885896703, +STORE, 1885896704, 1885904895, +STORE, 1885904896, 1885908991, +STORE, 1885908992, 1885913087, +STORE, 1885913088, 1885966335, +STORE, 1885966336, 1886232575, +STORE, 1886232576, 1886236671, +STORE, 1886236672, 1886240767, +STORE, 1886240768, 1886244863, +STORE, 1886244864, 1886248959, +STORE, 1886248960, 1886294015, +STORE, 1886294016, 1886494719, +STORE, 1886494720, 1886498815, +STORE, 1886498816, 1886502911, +STORE, 1886502912, 1886507007, +STORE, 1886507008, 1886511103, +STORE, 1886511104, 1886556159, +STORE, 1886556160, 1886629887, +STORE, 1886629888, 1886633983, +STORE, 1886633984, 1886638079, +STORE, 1886638080, 1886642175, +STORE, 1886642176, 1886646271, +STORE, 1886646272, 1886666751, +STORE, 1886666752, 1886670847, +STORE, 1886670848, 1886674943, +STORE, 1886674944, 1886679039, +STORE, 1886679040, 1895419903, +STORE, 1895419904, 1895550975, +STORE, 1895550976, 1896148991, +STORE, 1896148992, 1897189375, +STORE, 1897189376, 1897701375, +STORE, 1897701376, 1897803775, +STORE, 1897803776, 1897816063, +STORE, 1897816064, 1899913215, +STORE, 1899913216, 1909379071, +STORE, 1909379072, 1909387263, +STORE, 1909387264, 1909391359, +STORE, 1909391360, 1909432319, +STORE, 1909432320, 1909436415, +STORE, 1909436416, 1909440511, +STORE, 1909440512, 1909460991, +STORE, 1909460992, 1909547007, +STORE, 1909547008, 1909551103, +STORE, 1909551104, 1909555199, +STORE, 1909555200, 1909559295, +STORE, 1909559296, 1909563391, +STORE, 1909563392, 1909739519, +STORE, 1909739520, 1910566911, +STORE, 1910566912, 1910571007, +STORE, 1910571008, 1910575103, +STORE, 1910575104, 1910579199, +STORE, 1910579200, 1910583295, +STORE, 1910583296, 1910587391, +STORE, 1910587392, 1910620159, +STORE, 1910620160, 1910624255, +STORE, 1910624256, 1910628351, +STORE, 1910628352, 1910632447, +STORE, 1910632448, 1910652927, +STORE, 1910652928, 1910657023, +STORE, 1910657024, 1910661119, +STORE, 1910661120, 1910665215, +STORE, 1910665216, 1910669311, +STORE, 1910669312, 1910677503, +STORE, 1910677504, 1910681599, +STORE, 1910681600, 1910685695, +STORE, 1910685696, 1910689791, +STORE, 1910689792, 1910697983, +STORE, 1910697984, 1910702079, +STORE, 1910702080, 1910706175, +STORE, 1910706176, 1910710271, +STORE, 1910710272, 1914093567, +STORE, 1914093568, 1914097663, +STORE, 1914097664, 1969434623, +STORE, 1969434624, 1977819135, +STORE, 3290435584, 3426750463, +STORE, 3426750464, 3426754559, +STORE, 3426754560, 3426762751, +STORE, 3426762752, 3426766847, +STORE, 3426766848, 3426770943, +STORE, 3427037184, 3427061759, +STORE, 3427061760, 3427135487, +STORE, 3427135488, 3427143679, +STORE, 3427143680, 3427147775, +STORE, 3427147776, 3427209215, +STORE, 3427319808, 3432116223, +STORE, 3432116224, 3450130431, +STORE, 3450130432, 3451027455, +STORE, 3451027456, 3451031551, +STORE, 3451031552, 3451461631, +STORE, 3451736064, 3456688127, +STORE, 3456688128, 3475222527, +STORE, 3475222528, 3476119551, +STORE, 3476119552, 3476127743, +STORE, 3476127744, 3476553727, +STORE, 3476631552, 3477315583, +STORE, 3477315584, 3479949311, +STORE, 3479949312, 3480002559, +STORE, 3480002560, 3480006655, +STORE, 3480006656, 3480432639, +STORE, 3480539136, 3480543231, +STORE, 3480543232, 3480547327, +STORE, 3480547328, 3480555519, +STORE, 3480854528, 3480903679, +STORE, 3480903680, 3480969215, +STORE, 3480969216, 3480977407, +STORE, 3480977408, 3480981503, +STORE, 3481030656, 3481092095, +STORE, 3481092096, 3481235455, +STORE, 3481235456, 3481243647, +STORE, 3481243648, 3481247743, +STORE, 3481436160, 3481444351, +STORE, 3481444352, 3481456639, +STORE, 3481456640, 3481460735, +STORE, 3481460736, 3481464831, +STORE, 3481587712, 3481645055, +STORE, 3481645056, 3481772031, +STORE, 3481772032, 3481776127, +STORE, 3481776128, 3481780223, +STORE, 3481874432, 3481935871, +STORE, 3481935872, 3482030079, +STORE, 3482030080, 3482038271, +STORE, 3482038272, 3482042367, +STORE, 3482198016, 3482230783, +STORE, 3482230784, 3482271743, +STORE, 3482271744, 3482279935, +STORE, 3482279936, 3482284031, +STORE, 3482562560, 3482566655, +STORE, 3482566656, 3482570751, +STORE, 3482570752, 3482574847, +STORE, 3482636288, 3482689535, +STORE, 3482689536, 3482746879, +STORE, 3482746880, 3482755071, +STORE, 3482755072, 3482759167, +STORE, 3482972160, 3483062271, +STORE, 3483062272, 3483242495, +STORE, 3483242496, 3483246591, +STORE, 3483246592, 3483250687, +STORE, 3483398144, 3483688959, +STORE, 3483688960, 3484114943, +STORE, 3484114944, 3484131327, +STORE, 3484131328, 3484135423, +STORE, 3484135424, 3484143615, +STORE, 3484184576, 3484475391, +STORE, 3484475392, 3485028351, +STORE, 3485028352, 3485057023, +STORE, 3485057024, 3485061119, +STORE, 3485360128, 3485364223, +STORE, 3485364224, 3485368319, +STORE, 3485368320, 3485372415, +STORE, 3485589504, 3485593599, +STORE, 3485593600, 3485597695, +STORE, 3485597696, 3485601791, +STORE, 3485913088, 3485937663, +STORE, 3485937664, 3485974527, +STORE, 3485974528, 3485982719, +STORE, 3485982720, 3485986815, +STORE, 3486052352, 3486056447, +STORE, 3486056448, 3486064639, +STORE, 3486064640, 3486068735, +STORE, 3486068736, 3486072831, +STORE, 3486294016, 3486302207, +STORE, 3486302208, 3486306303, +STORE, 3486306304, 3486310399, +STORE, 3486310400, 3486314495, +STORE, 3486670848, 3486679039, +STORE, 3486679040, 3486683135, +STORE, 3486683136, 3486687231, +STORE, 3486687232, 3486691327, +STORE, 3486863360, 3486871551, +STORE, 3486871552, 3486875647, +STORE, 3486875648, 3486879743, +STORE, 3486879744, 3486883839, +STORE, 3487584256, 3522543615, +STORE, 3522543616, 3523321855, +STORE, 3523321856, 3523342335, +STORE, 3523342336, 3523387391, +STORE, 3523387392, 3523391487, +STORE, 3523391488, 3523395583, +STORE, 3523477504, 3523686399, +STORE, 3523686400, 3523981311, +STORE, 3523981312, 3523997695, +STORE, 3523997696, 3524001791, +STORE, 3524177920, 3525013503, +STORE, 3525013504, 3526582271, +STORE, 3526582272, 3526606847, +STORE, 3526606848, 3526610943, +STORE, 3526610944, 3526615039, +STORE, 3526672384, 3526746111, +STORE, 3526746112, 3526860799, +STORE, 3526860800, 3526868991, +STORE, 3526868992, 3526873087, +STORE, 3527000064, 3527475199, +STORE, 3527475200, 3527479295, +STORE, 3527479296, 3527573503, +STORE, 3527573504, 3527581695, +STORE, 3527581696, 3527585791, +STORE, 3527585792, 3527606271, +STORE, 3527909376, 3527913471, +STORE, 3527913472, 3527917567, +STORE, 3527917568, 3527921663, +STORE, 3527950336, 3528011775, +STORE, 3528011776, 3528093695, +STORE, 3528093696, 3528101887, +STORE, 3528101888, 3528105983, +STORE, 3528228864, 3528241151, +STORE, 3528241152, 3528261631, +STORE, 3528261632, 3528265727, +STORE, 3528273920, 3528593407, +STORE, 3528593408, 3528609791, +STORE, 3528609792, 3528638463, +STORE, 3528638464, 3528642559, +STORE, 3528642560, 3528646655, +STORE, 3528880128, 3528912895, +STORE, 3528912896, 3528962047, +STORE, 3528962048, 3528966143, +STORE, 3528966144, 3528970239, +STORE, 3528982528, 3530293247, +STORE, 3530366976, 3530825727, +STORE, 3530825728, 3531317247, +STORE, 3531317248, 3541041151, +STORE, 3541041152, 3541303295, +STORE, 3541430272, 3566206975, +STORE, 3566206976, 3566993407, +STORE, 3567239168, 3587571711, +STORE, 3587571712, 3588284415, +STORE, 3588284416, 3588661247, +STORE, 3588661248, 3589066751, +STORE, 3589066752, 3589574655, +STORE, 3589574656, 3590078463, +STORE, 3590078464, 3590373375, +STORE, 3590373376, 3590668287, +STORE, 3590668288, 3590963199, +STORE, 3590963200, 3591294975, +STORE, 3591294976, 3591602175, +STORE, 3591602176, 3591933951, +STORE, 3591933952, 3592241151, +STORE, 3592241152, 3592572927, +STORE, 3592572928, 3592876031, +STORE, 3592876032, 3593211903, +STORE, 3593211904, 3593547775, +STORE, 3593547776, 3593650175, +STORE, 3593650176, 3593928703, +STORE, 3593928704, 3593936895, +STORE, 3593936896, 3593940991, +STORE, 3594006528, 3594301439, +STORE, 3594301440, 3594739711, +STORE, 3594739712, 3594756095, +STORE, 3594756096, 3594760191, +STORE, 3594760192, 3594768383, +STORE, 3594952704, 3595051007, +STORE, 3595051008, 3595223039, +STORE, 3595223040, 3595227135, +STORE, 3595227136, 3595235327, +STORE, 3595431936, 3595775999, +STORE, 3595776000, 3596701695, +STORE, 3596701696, 3596742655, +STORE, 3596742656, 3596746751, +STORE, 3596746752, 3596750847, +STORE, 3596767232, 3597070335, +STORE, 3597070336, 3597402111, +STORE, 3597402112, 3598188543, +STORE, 3598262272, 3623428095, +STORE, 3623428096, 3623432191, +STORE, 3623432192, 3623436287, +STORE, 3623436288, 3623440383, +STORE, 3623616512, 3623878655, +STORE, 3624169472, 3624300543, +STORE, 3627524096, 3628523519, +STORE, 3628523520, 3629522943, +STORE, 3696631808, 3730186239, +STORE, 3730186240, 3763740671, +STORE, 3763740672, 3764027391, +STORE, 3764027392, 3765133311, +STORE, 3765133312, 3765145599, +STORE, 3765145600, 3765149695, +STORE, 3765178368, 3766022143, +STORE, 3766022144, 3768791039, +STORE, 3768791040, 3768840191, +STORE, 3768840192, 3768844287, +STORE, 3768897536, 3768913919, +STORE, 3768913920, 3768934399, +STORE, 3768934400, 3768938495, +STORE, 3769016320, 3769147391, +STORE, 3769147392, 3769233407, +STORE, 3769233408, 3769356287, +STORE, 3769356288, 3769360383, +STORE, 3769360384, 3769368575, +STORE, 3769376768, 3794542591, +STORE, 3794542592, 3794599935, +STORE, 3794599936, 3794731007, +STORE, 3794731008, 3794735103, +STORE, 3794735104, 3794743295, +STORE, 3794849792, 3794980863, +STORE, 3794980864, 3794984959, +STORE, 3794984960, 3794989055, +STORE, 3794989056, 3794993151, +STORE, 3794993152, 3794997247, +STORE, 3795103744, 3795128319, +STORE, 3795128320, 3795165183, +STORE, 3795165184, 3795169279, +STORE, 3795169280, 3795173375, +STORE, 3795210240, 3795357695, +STORE, 3795357696, 3795365887, +STORE, 3795365888, 3795374079, +STORE, 3795374080, 3795378175, +STORE, 3795378176, 3795382271, +STORE, 3795406848, 3795738623, +STORE, 3795738624, 3795742719, +STORE, 3795742720, 3795755007, +STORE, 3795755008, 3795759103, +STORE, 3795763200, 3795894271, +STORE, 3795894272, 3796041727, +STORE, 3796041728, 3796054015, +STORE, 3796054016, 3796066303, +STORE, 3796066304, 3796070399, +STORE, 3796176896, 3796205567, +STORE, 3796205568, 3796250623, +STORE, 3796250624, 3796254719, +STORE, 3796254720, 3796258815, +STORE, 3796262912, 3796393983, +STORE, 3796393984, 3796516863, +STORE, 3796516864, 3796873215, +STORE, 3796873216, 3796885503, +STORE, 3796885504, 3796889599, +STORE, 3796963328, 3796967423, +STORE, 3796967424, 3796975615, +STORE, 3796975616, 3796979711, +STORE, 3797000192, 3797307391, +STORE, 3797307392, 3797311487, +STORE, 3797311488, 3797315583, +STORE, 3797315584, 3797323775, +STORE, 3797327872, 3797450751, +STORE, 3797450752, 3797458943, +STORE, 3797458944, 3797471231, +STORE, 3797471232, 3797475327, +STORE, 3797577728, 3797700607, +STORE, 3797700608, 3797721087, +STORE, 3797721088, 3797733375, +STORE, 3797733376, 3797741567, +STORE, 3797741568, 3797864447, +STORE, 3797864448, 3797995519, +STORE, 3797995520, 3798048767, +STORE, 3798048768, 3798179839, +STORE, 3798179840, 3798188031, +STORE, 3798188032, 3798192127, +STORE, 3798290432, 3798302719, +STORE, 3798302720, 3798323199, +STORE, 3798323200, 3798327295, +STORE, 3798327296, 3798331391, +STORE, 3798429696, 3798433791, +STORE, 3798433792, 3798552575, +STORE, 3798552576, 3798556671, +STORE, 3798556672, 3798568959, +STORE, 3798568960, 3798573055, +STORE, 3798573056, 3798581247, +STORE, 3798618112, 3798749183, +STORE, 3798749184, 3798855679, +STORE, 3798855680, 3798966271, +STORE, 3798966272, 3798982655, +STORE, 3798982656, 3798986751, +STORE, 3799101440, 3799171071, +STORE, 3799171072, 3799240703, +STORE, 3799240704, 3799248895, +STORE, 3799248896, 3799252991, +STORE, 3799326720, 3799650303, +STORE, 3799650304, 3800629247, +STORE, 3800629248, 3800641535, +STORE, 3800641536, 3800645631, +STORE, 3800645632, 3800649727, +STORE, 3800649728, 3800903679, +STORE, 3800903680, 3800936447, +STORE, 3800936448, 3800969215, +STORE, 3800969216, 3800981503, +STORE, 3800981504, 3800985599, +STORE, 3801001984, 3801133055, +STORE, 3801133056, 3801202687, +STORE, 3801202688, 3801591807, +STORE, 3801591808, 3801599999, +STORE, 3801600000, 3801604095, +STORE, 3801604096, 3801608191, +STORE, 3801608192, 3801739263, +STORE, 3801739264, 3801755647, +STORE, 3801755648, 3801796607, +STORE, 3801796608, 3801804799, +STORE, 3801804800, 3801808895, +STORE, 3801878528, 3801944063, +STORE, 3801944064, 3802116095, +STORE, 3802116096, 3802124287, +STORE, 3802124288, 3802128383, +STORE, 3802136576, 3803447295, +STORE, 3803492352, 3803553791, +STORE, 3803553792, 3804233727, +STORE, 3804233728, 3806068735, +STORE, 3806121984, 3806253055, +STORE, 3806253056, 3806674943, +STORE, 3806674944, 3807117311, +STORE, 3807117312, 3807379455, +STORE, 3807379456, 3807432703, +STORE, 3807432704, 3807563775, +STORE, 3807563776, 3809202175, +STORE, 3809202176, 3810250751, +STORE, 3810250752, 3827027967, +STORE, 3827027968, 3829125119, +STORE, 3829125120, 3837513727, +STORE, 3837513728, 3839610879, +STORE, 3839610880, 3847999487, +STORE, 3847999488, 3856392191, +STORE, 3856392192, 3864784895, +STORE, 3864784896, 3868983295, +STORE, 3868983296, 3885760511, +STORE, 3885760512, 3886809087, +STORE, 3886809088, 3887857663, +STORE, 3887857664, 3888119807, +STORE, 3888144384, 3888148479, +STORE, 3888148480, 3888218111, +STORE, 3888218112, 3888222207, +STORE, 3888222208, 3888353279, +STORE, 3888353280, 3889172479, +STORE, 3889172480, 3892314111, +STORE, 3892314112, 3892576255, +STORE, 3892588544, 3892637695, +STORE, 3892637696, 3892686847, +STORE, 3892686848, 3892744191, +STORE, 3892748288, 3892785151, +STORE, 3892785152, 3895459839, +STORE, 3895459840, 3895721983, +STORE, 3895738368, 3895885823, +STORE, 3895885824, 3897081855, +STORE, 3897081856, 3906482175, +STORE, 3906482176, 3916144639, +STORE, 3916144640, 3925766143, +STORE, 3925766144, 3926974463, +STORE, 3926974464, 3928367103, +STORE, 3928367104, 3928911871, +STORE, 3928911872, 3933995007, +STORE, 3933995008, 3935830015, +STORE, 3935830016, 3935846399, +STORE, 3935879168, 3936010239, +STORE, 3936010240, 3936026623, +STORE, 3936026624, 3936034815, +STORE, 3936034816, 3936051199, +STORE, 3936051200, 3936055295, +STORE, 3936071680, 3936137215, +STORE, 3936137216, 3936202751, +STORE, 3936202752, 3936219135, +STORE, 3936235520, 3936251903, +STORE, 3936268288, 3936276479, +STORE, 3936276480, 3936284671, +STORE, 3936284672, 3936288767, +STORE, 3936288768, 3936292863, +STORE, 3936296960, 3936354303, +STORE, 3936354304, 3936616447, +STORE, 3936628736, 3936669695, +STORE, 3936669696, 3936747519, +STORE, 3936747520, 3936870399, +STORE, 3936870400, 3936874495, +STORE, 3936874496, 3936878591, +STORE, 3936882688, 3936903167, +STORE, 3936911360, 3936948223, +STORE, 3936948224, 3936964607, +STORE, 3936964608, 3937103871, +STORE, 3937103872, 3937107967, +STORE, 3937132544, 3937161215, +STORE, 3937189888, 3937255423, +STORE, 3937255424, 3938512895, +STORE, 3938512896, 3945435135, +STORE, 3945435136, 3945476095, +STORE, 3945476096, 3945484287, +STORE, 3945484288, 3945496575, +STORE, 3945500672, 3945541631, +STORE, 3945558016, 3945566207, +STORE, 3945566208, 3945594879, +STORE, 3945594880, 3945598975, +STORE, 3945598976, 3945603071, +STORE, 3945611264, 3945742335, +STORE, 3945742336, 3945844735, +STORE, 3945844736, 3945848831, +STORE, 3945848832, 3945861119, +STORE, 3945861120, 3945865215, +STORE, 3945869312, 3945897983, +STORE, 3945897984, 3946303487, +STORE, 3946303488, 3946397695, +STORE, 3946397696, 3946569727, +STORE, 3946569728, 3946573823, +STORE, 3946573824, 3946594303, +STORE, 3946594304, 3946663935, +STORE, 3946663936, 3946708991, +STORE, 3946708992, 3946823679, +STORE, 3946823680, 3946827775, +STORE, 3946827776, 3946831871, +STORE, 3946831872, 3946860543, +STORE, 3946893312, 3946897407, +STORE, 3946897408, 3946905599, +STORE, 3946905600, 3946909695, +STORE, 3946909696, 3946913791, +STORE, 3946913792, 3946930175, +STORE, 3946930176, 3946967039, +STORE, 3946967040, 3947102207, +STORE, 3947102208, 3948412927, +STORE, 3948441600, 3948556287, +STORE, 3948556288, 3948576767, +STORE, 3948576768, 3948597247, +STORE, 3948597248, 3948605439, +STORE, 3948605440, 3948609535, +STORE, 3948609536, 3948654591, +STORE, 3948654592, 3948781567, +STORE, 3948781568, 3948822527, +STORE, 3948822528, 3948904447, +STORE, 3948904448, 3948908543, +STORE, 3948908544, 3948912639, +STORE, 3948945408, 3949043711, +STORE, 3949043712, 3949174783, +STORE, 3949174784, 3949191167, +STORE, 3949191168, 3949195263, +STORE, 3949207552, 3949252607, +STORE, 3949252608, 3949256703, +STORE, 3949256704, 3949363199, +STORE, 3949363200, 3949367295, +STORE, 3949367296, 3949379583, +STORE, 3949379584, 3949383679, +STORE, 3949383680, 3949400063, +STORE, 3949400064, 3949404159, +STORE, 3949416448, 3949481983, +STORE, 3949481984, 3949486079, +STORE, 3949486080, 3949592575, +STORE, 3949592576, 3949596671, +STORE, 3949596672, 3949621247, +STORE, 3949621248, 3949662207, +STORE, 3949662208, 3949666303, +STORE, 3949694976, 3949727743, +STORE, 3949727744, 3949731839, +STORE, 3949731840, 3949838335, +STORE, 3949838336, 3949842431, +STORE, 3949842432, 3949846527, +STORE, 3949846528, 3949854719, +STORE, 3949854720, 3949858815, +STORE, 3949858816, 3949862911, +STORE, 3949867008, 3949891583, +STORE, 3949891584, 3949928447, +STORE, 3949928448, 3949993983, +STORE, 3949993984, 3950043135, +STORE, 3950043136, 3950059519, +STORE, 3950059520, 3950096383, +STORE, 3950096384, 3950100479, +STORE, 3950100480, 3950104575, +STORE, 3950104576, 3950157823, +STORE, 3950157824, 3950292991, +STORE, 3950292992, 3950346239, +STORE, 3950346240, 3950477311, +STORE, 3950477312, 3950485503, +STORE, 3950485504, 3950489599, +STORE, 3950493696, 3950510079, +STORE, 3950510080, 3950661631, +STORE, 3950661632, 3951005695, +STORE, 3951005696, 3951026175, +STORE, 3951026176, 3951030271, +STORE, 3951030272, 3951054847, +STORE, 3951054848, 3951116287, +STORE, 3951116288, 3951144959, +STORE, 3951144960, 3951149055, +STORE, 3951149056, 3951194111, +STORE, 3951194112, 3951202303, +STORE, 3951202304, 3951206399, +STORE, 3951210496, 3951226879, +STORE, 3951226880, 3951329279, +STORE, 3951329280, 3951366143, +STORE, 3951366144, 3951411199, +STORE, 3951411200, 3951415295, +STORE, 3951415296, 3951419391, +STORE, 3951419392, 3951452159, +STORE, 3951452160, 3951566847, +STORE, 3951566848, 3951812607, +STORE, 3951812608, 3952173055, +STORE, 3952173056, 3952214015, +STORE, 3952214016, 3952218111, +STORE, 3952222208, 3952250879, +STORE, 3952250880, 3952369663, +STORE, 3952369664, 3952488447, +STORE, 3952488448, 3952627711, +STORE, 3952627712, 3952635903, +STORE, 3952635904, 3952639999, +STORE, 3952652288, 3952668671, +STORE, 3952668672, 3953000447, +STORE, 3953000448, 3953004543, +STORE, 3953004544, 3953008639, +STORE, 3953008640, 3953012735, +STORE, 3953012736, 3953037311, +STORE, 3953037312, 3953151999, +STORE, 3953152000, 3953291263, +STORE, 3953291264, 3953324031, +STORE, 3953324032, 3953364991, +STORE, 3953364992, 3953373183, +STORE, 3953373184, 3953377279, +STORE, 3953381376, 3953410047, +STORE, 3953410048, 3953491967, +STORE, 3953491968, 3953643519, +STORE, 3953643520, 3953651711, +STORE, 3953651712, 3953655807, +STORE, 3953659904, 3953766399, +STORE, 3953766400, 3953774591, +STORE, 3953774592, 3953786879, +STORE, 3953786880, 3953790975, +STORE, 3953790976, 3953823743, +STORE, 3953823744, 3953963007, +STORE, 3953963008, 3954024447, +STORE, 3954024448, 3954118655, +STORE, 3954118656, 3954122751, +STORE, 3954122752, 3954126847, +STORE, 3954130944, 3954184191, +STORE, 3954184192, 3954294783, +STORE, 3954294784, 3954323455, +STORE, 3954323456, 3954393087, +STORE, 3954393088, 3954397183, +STORE, 3954397184, 3954401279, +STORE, 3954401280, 3954405375, +STORE, 3954409472, 3954528255, +STORE, 3954528256, 3954737151, +STORE, 3954737152, 3955052543, +STORE, 3955052544, 3955060735, +STORE, 3955060736, 3955064831, +STORE, 3955068928, 3955105791, +STORE, 3955105792, 3955167231, +STORE, 3955167232, 3955277823, +STORE, 3955277824, 3955310591, +STORE, 3955310592, 3955351551, +STORE, 3955351552, 3955359743, +STORE, 3955359744, 3955363839, +STORE, 3955363840, 3955392511, +STORE, 3955392512, 3955453951, +STORE, 3955453952, 3955601407, +STORE, 3955601408, 3955777535, +STORE, 3955777536, 3955982335, +STORE, 3955982336, 3956011007, +STORE, 3956011008, 3956015103, +STORE, 3956023296, 3956039679, +STORE, 3956039680, 3956125695, +STORE, 3956125696, 3956129791, +STORE, 3956129792, 3956133887, +STORE, 3956133888, 3956137983, +STORE, 3956142080, 3956449279, +STORE, 3956449280, 3956543487, +STORE, 3956543488, 3956719615, +STORE, 3956719616, 3956731903, +STORE, 3956731904, 3956735999, +STORE, 3956744192, 3956793343, +STORE, 3956793344, 3956887551, +STORE, 3956887552, 3956953087, +STORE, 3956953088, 3957035007, +STORE, 3957035008, 3957039103, +STORE, 3957039104, 3957047295, +STORE, 3957047296, 3957071871, +STORE, 3957071872, 3957231615, +STORE, 3957231616, 3957563391, +STORE, 3957563392, 3957579775, +STORE, 3957579776, 3957583871, +STORE, 3957592064, 3957608447, +STORE, 3957608448, 3957878783, +STORE, 3957878784, 3958591487, +STORE, 3958591488, 3958599679, +STORE, 3958599680, 3958607871, +STORE, 3958607872, 3958620159, +STORE, 3958620160, 3958624255, +STORE, 3958624256, 3963199487, +STORE, 3963199488, 3963285503, +STORE, 3963285504, 3963371519, +STORE, 3963371520, 3963428863, +STORE, 3963428864, 3963555839, +STORE, 3963555840, 3963559935, +STORE, 3963559936, 3963564031, +STORE, 3963568128, 3963596799, +STORE, 3963596800, 3963682815, +STORE, 3963682816, 3963695103, +STORE, 3963695104, 3963711487, +STORE, 3963711488, 3963715583, +STORE, 3963719680, 3963752447, +STORE, 3963752448, 3963846655, +STORE, 3963846656, 3963932671, +STORE, 3963932672, 3964444671, +STORE, 3964444672, 3964448767, +STORE, 3964448768, 3965808639, +STORE, 3965808640, 3965845503, +STORE, 3965845504, 3965849599, +STORE, 3965853696, 3965935615, +STORE, 3965935616, 3966017535, +STORE, 3966017536, 3966103551, +STORE, 3966103552, 3966685183, +STORE, 3966685184, 3967705087, +STORE, 3967705088, 3967758335, +STORE, 3967758336, 3967762431, +STORE, 3967762432, 3967770623, +STORE, 3967770624, 3967799295, +STORE, 3967799296, 3967848447, +STORE, 3967848448, 3967868927, +STORE, 3967868928, 3967901695, +STORE, 3967901696, 3967905791, +STORE, 3967905792, 3967909887, +STORE, 3967909888, 3967995903, +STORE, 3967995904, 3968077823, +STORE, 3968077824, 3968159743, +STORE, 3968159744, 3968167935, +STORE, 3968167936, 3968172031, +STORE, 3968172032, 3968192511, +STORE, 3968192512, 3968196607, +STORE, 3968196608, 3968200703, +STORE, 3968208896, 3968516095, +STORE, 3968516096, 3968528383, +STORE, 3968528384, 3968552959, +STORE, 3968552960, 3968557055, +STORE, 3968561152, 3968593919, +STORE, 3968593920, 3968626687, +STORE, 3968626688, 3971153919, +STORE, 3971153920, 3973754879, +STORE, 3973754880, 3973804031, +STORE, 3973804032, 3973820415, +STORE, 3973820416, 3973832703, +STORE, 3973840896, 3973873663, +STORE, 3973873664, 3973967871, +STORE, 3973967872, 3973976063, +STORE, 3973976064, 3973984255, +STORE, 3973984256, 3973988351, +STORE, 3973988352, 3973992447, +STORE, 3973996544, 3974008831, +STORE, 3974008832, 3974045695, +STORE, 3974045696, 3974139903, +STORE, 3974139904, 3974254591, +STORE, 3974254592, 3974275071, +STORE, 3974275072, 3974291455, +STORE, 3974291456, 3974295551, +STORE, 3974295552, 3974373375, +STORE, 3974373376, 3974524927, +STORE, 3974524928, 3974529023, +STORE, 3974529024, 3974537215, +STORE, 3974537216, 3974541311, +STORE, 3974541312, 3974545407, +STORE, 3974545408, 3974627327, +STORE, 3974627328, 3974680575, +STORE, 3974680576, 3974811647, +STORE, 3974811648, 3974819839, +STORE, 3974819840, 3974823935, +STORE, 3974832128, 3974918143, +STORE, 3974918144, 3974963199, +STORE, 3974963200, 3975077887, +STORE, 3975077888, 3975090175, +STORE, 3975090176, 3975094271, +STORE, 3975094272, 3975102463, +STORE, 3975102464, 3975114751, +STORE, 3975114752, 3975266303, +STORE, 3975266304, 3975274495, +STORE, 3975274496, 3975286783, +STORE, 3975286784, 3975290879, +STORE, 3975290880, 3975299071, +STORE, 3975299072, 3975315455, +STORE, 3975315456, 3975430143, +STORE, 3975430144, 3975536639, +STORE, 3975536640, 3975651327, +STORE, 3975651328, 3975655423, +STORE, 3975655424, 3975659519, +STORE, 3975659520, 3975770111, +STORE, 3975770112, 3975778303, +STORE, 3975778304, 3975790591, +STORE, 3975790592, 3975794687, +STORE, 3975794688, 3975798783, +STORE, 3975798784, 3975831551, +STORE, 3975831552, 3975872511, +STORE, 3975872512, 3975987199, +STORE, 3975987200, 3976134655, +STORE, 3976134656, 3977175039, +STORE, 3977175040, 3977183231, +STORE, 3977183232, 3977191423, +STORE, 3977191424, 3977195519, +STORE, 3977199616, 3977248767, +STORE, 3977248768, 3977539583, +STORE, 3977539584, 3977965567, +STORE, 3977965568, 3977981951, +STORE, 3977981952, 3977986047, +STORE, 3977986048, 3977994239, +STORE, 3977994240, 3978002431, +STORE, 3978002432, 3978084351, +STORE, 3978084352, 3978125311, +STORE, 3978125312, 3978174463, +STORE, 3978174464, 3978178559, +STORE, 3978178560, 3978182655, +STORE, 3978182656, 3978207231, +STORE, 3978207232, 3978297343, +STORE, 3978297344, 3978301439, +STORE, 3978301440, 3978305535, +STORE, 3978305536, 3978309631, +STORE, 3978309632, 3978317823, +STORE, 3978317824, 3978625023, +STORE, 3978625024, 3978657791, +STORE, 3978657792, 3978727423, +STORE, 3978727424, 3978735615, +STORE, 3978735616, 3978739711, +STORE, 3978739712, 3978760191, +STORE, 3978760192, 3978842111, +STORE, 3978842112, 3978850303, +STORE, 3978850304, 3978858495, +STORE, 3978858496, 3978862591, +STORE, 3978862592, 3978895359, +STORE, 3978895360, 3979014143, +STORE, 3979014144, 3979132927, +STORE, 3979132928, 3979288575, +STORE, 3979288576, 3979481087, +STORE, 3979481088, 3979489279, +STORE, 3979489280, 3979493375, +STORE, 3979497472, 3979583487, +STORE, 3979583488, 3979673599, +STORE, 3979673600, 3979718655, +STORE, 3979718656, 3979829247, +STORE, 3979829248, 3979841535, +STORE, 3979841536, 3979882495, +STORE, 3979882496, 3979964415, +STORE, 3979964416, 3980013567, +STORE, 3980013568, 3980148735, +STORE, 3980148736, 3980152831, +STORE, 3980152832, 3980320767, +STORE, 3980320768, 3980337151, +STORE, 3980337152, 3980341247, +STORE, 3980345344, 3980365823, +STORE, 3980365824, 3980423167, +STORE, 3980423168, 3980460031, +STORE, 3980460032, 3980500991, +STORE, 3980500992, 3980509183, +STORE, 3980509184, 3980513279, +STORE, 3980513280, 3980546047, +STORE, 3980546048, 3980660735, +STORE, 3980660736, 3980951551, +STORE, 3980951552, 3981500415, +STORE, 3981500416, 3981529087, +STORE, 3981529088, 3981533183, +STORE, 3981537280, 3981549567, +STORE, 3981549568, 3981598719, +STORE, 3981598720, 3981717503, +STORE, 3981717504, 3982127103, +STORE, 3982127104, 3982675967, +STORE, 3982675968, 3982733311, +STORE, 3982733312, 3982737407, +STORE, 3982741504, 3982860287, +STORE, 3982860288, 3982905343, +STORE, 3982905344, 3982966783, +STORE, 3982966784, 3982974975, +STORE, 3982974976, 3982979071, +STORE, 3982979072, 3983032319, +STORE, 3983032320, 3983085567, +STORE, 3983085568, 3983208447, +STORE, 3983208448, 3983212543, +STORE, 3983212544, 3983220735, +STORE, 3983220736, 3983224831, +STORE, 3983224832, 3983237119, +STORE, 3983237120, 3983351807, +STORE, 3983351808, 3983376383, +STORE, 3983376384, 3983392767, +STORE, 3983392768, 3983396863, +STORE, 3983396864, 3983400959, +STORE, 3983400960, 3983417343, +STORE, 3983417344, 3983753215, +STORE, 3983753216, 3983757311, +STORE, 3983757312, 3983761407, +STORE, 3983761408, 3983765503, +STORE, 3983765504, 3983769599, +STORE, 3983769600, 3983880191, +STORE, 3983880192, 3983892479, +STORE, 3983892480, 3983900671, +STORE, 3983900672, 3983904767, +STORE, 3983904768, 3983908863, +STORE, 3983908864, 3983941631, +STORE, 3983941632, 3983990783, +STORE, 3983990784, 3984097279, +STORE, 3984097280, 3984105471, +STORE, 3984105472, 3984117759, +STORE, 3984117760, 3984121855, +STORE, 3984121856, 3984125951, +STORE, 3984125952, 3984134143, +STORE, 3984134144, 3984150527, +STORE, 3984150528, 3984416767, +STORE, 3984416768, 3984470015, +STORE, 3984470016, 3984564223, +STORE, 3984564224, 3984568319, +STORE, 3984572416, 3984629759, +STORE, 3984629760, 3984805887, +STORE, 3984805888, 3985096703, +STORE, 3985096704, 3985104895, +STORE, 3985104896, 3985108991, +STORE, 3985113088, 3986862079, +STORE, 3986862080, 3993640959, +STORE, 3993640960, 3993739263, +STORE, 3993739264, 3993743359, +STORE, 3993743360, 3993759743, +STORE, 3993759744, 3993780223, +STORE, 3993780224, 3993784319, +STORE, 3993784320, 3993792511, +STORE, 3993792512, 3993796607, +STORE, 3993796608, 3993800703, +STORE, 3993804800, 3994214399, +STORE, 3994214400, 3994218495, +STORE, 3994218496, 3994222591, +STORE, 3994222592, 3994226687, +STORE, 3994230784, 3994243071, +STORE, 3994243072, 3994255359, +STORE, 3994255360, 3994304511, +STORE, 3994304512, 3994386431, +STORE, 3994386432, 3994509311, +STORE, 3994509312, 3994521599, +STORE, 3994521600, 3994525695, +STORE, 3994529792, 3994542079, +STORE, 3994542080, 3994660863, +STORE, 3994660864, 3994705919, +STORE, 3994705920, 3994796031, +STORE, 3994796032, 3994800127, +STORE, 3994800128, 3994804223, +STORE, 3994804224, 3994812415, +STORE, 3994812416, 3994845183, +STORE, 3994845184, 3994898431, +STORE, 3994898432, 3994902527, +STORE, 3994902528, 3994906623, +STORE, 3994910720, 3994931199, +STORE, 3994931200, 3995181055, +STORE, 3995181056, 3995222015, +STORE, 3995222016, 3995275263, +STORE, 3995275264, 3995279359, +STORE, 3995279360, 3995283455, +STORE, 3995283456, 3995291647, +STORE, 3995291648, 3995324415, +STORE, 3995324416, 3995451391, +STORE, 3995451392, 3995697151, +STORE, 3995697152, 3996078079, +STORE, 3996078080, 3996086271, +STORE, 3996086272, 3996090367, +STORE, 3996094464, 3996119039, +STORE, 3996119040, 3996200959, +STORE, 3996200960, 3996229631, +STORE, 3996229632, 3996233727, +STORE, 3996233728, 3996282879, +STORE, 3996282880, 3996291071, +STORE, 3996291072, 3996295167, +STORE, 3996299264, 3996311551, +STORE, 3996311552, 3996430335, +STORE, 3996430336, 3996467199, +STORE, 3996467200, 3996504063, +STORE, 3996504064, 3996512255, +STORE, 3996512256, 3996516351, +STORE, 3996516352, 3996540927, +STORE, 3996540928, 3996671999, +STORE, 3996672000, 3996676095, +STORE, 3996676096, 3996684287, +STORE, 3996684288, 3996688383, +STORE, 3996688384, 3996692479, +STORE, 3996692480, 3996717055, +STORE, 3996717056, 3997048831, +STORE, 3997048832, 3997057023, +STORE, 3997057024, 3997073407, +STORE, 3997073408, 3997077503, +STORE, 3997077504, 3997081599, +STORE, 3997081600, 3997097983, +STORE, 3997097984, 3997179903, +STORE, 3997179904, 3997356031, +STORE, 3997356032, 3997650943, +STORE, 3997650944, 3997675519, +STORE, 3997675520, 3997679615, +STORE, 3997683712, 3997700095, +STORE, 3997700096, 3997745151, +STORE, 3997745152, 3997802495, +STORE, 3997802496, 3997810687, +STORE, 3997810688, 3997814783, +STORE, 3997814784, 3998064639, +STORE, 3998064640, 3998081023, +STORE, 3998081024, 3998085119, +STORE, 3998085120, 3998130175, +STORE, 3998130176, 3998134271, +STORE, 3998134272, 3998142463, +STORE, 3998142464, 3998179327, +STORE, 3998179328, 3998212095, +STORE, 3998212096, 3998326783, +STORE, 3998326784, 3998351359, +STORE, 3998351360, 3998392319, +STORE, 3998392320, 3998396415, +STORE, 3998396416, 3998400511, +STORE, 3998400512, 3998433279, +STORE, 3998433280, 3998466047, +STORE, 3998466048, 3998613503, +STORE, 3998613504, 3998666751, +STORE, 3998666752, 3998724095, +STORE, 3998724096, 3998732287, +STORE, 3998732288, 3998736383, +STORE, 3998736384, 3998760959, +STORE, 3998760960, 3998777343, +STORE, 3998777344, 3998822399, +STORE, 3998822400, 3998826495, +STORE, 3998826496, 3998830591, +STORE, 3998830592, 3998863359, +STORE, 3998863360, 3998900223, +STORE, 3998900224, 3999043583, +STORE, 3999043584, 3999121407, +STORE, 3999121408, 3999215615, +STORE, 3999215616, 3999223807, +STORE, 3999223808, 3999227903, +STORE, 3999227904, 3999236095, +STORE, 3999236096, 3999268863, +STORE, 3999268864, 3999301631, +STORE, 3999301632, 3999354879, +STORE, 3999354880, 3999428607, +STORE, 3999428608, 3999436799, +STORE, 3999436800, 3999440895, +STORE, 3999444992, 3999461375, +STORE, 3999461376, 3999584255, +STORE, 3999584256, 3999760383, +STORE, 3999760384, 4000219135, +STORE, 4000219136, 4000235519, +STORE, 4000235520, 4000251903, +STORE, 4000251904, 4000501759, +STORE, 4000501760, 4000505855, +STORE, 4000505856, 4000509951, +STORE, 4000509952, 4000518143, +STORE, 4000518144, 4000522239, +STORE, 4000522240, 4000587775, +STORE, 4000587776, 4000645119, +STORE, 4000645120, 4000813055, +STORE, 4000813056, 4000817151, +STORE, 4000821248, 4000837631, +STORE, 4000837632, 4000870399, +STORE, 4000870400, 4000874495, +STORE, 4000874496, 4000878591, +STORE, 4000878592, 4000882687, +STORE, 4000882688, 4000886783, +STORE, 4000886784, 4000890879, +STORE, 4000890880, 4000907263, +STORE, 4000907264, 4001214463, +STORE, 4001214464, 4001558527, +STORE, 4001558528, 4002484223, +STORE, 4002484224, 4002525183, +STORE, 4002525184, 4002529279, +STORE, 4002529280, 4002533375, +STORE, 4002533376, 4002537471, +STORE, 4002537472, 4002660351, +STORE, 4002660352, 4002779135, +STORE, 4002779136, 4002791423, +STORE, 4002791424, 4002799615, +STORE, 4002799616, 4002807807, +STORE, 4002807808, 4002811903, +STORE, 4002811904, 4002828287, +STORE, 4002828288, 4002910207, +STORE, 4002910208, 4003028991, +STORE, 4003028992, 4003037183, +STORE, 4003037184, 4003045375, +STORE, 4003045376, 4003049471, +STORE, 4003049472, 4003053567, +STORE, 4003053568, 4003057663, +STORE, 4003057664, 4003065855, +STORE, 4003065856, 4003135487, +STORE, 4003135488, 4003446783, +STORE, 4003446784, 4003450879, +STORE, 4003450880, 4003454975, +STORE, 4003454976, 4003459071, +STORE, 4003459072, 4003463167, +STORE, 4003463168, 4003495935, +STORE, 4003495936, 4003569663, +STORE, 4003569664, 4003573759, +STORE, 4003573760, 4003704831, +STORE, 4003704832, 4003708927, +STORE, 4003708928, 4003713023, +STORE, 4003713024, 4003737599, +STORE, 4003737600, 4003770367, +STORE, 4003770368, 4003876863, +STORE, 4003876864, 4003880959, +STORE, 4003880960, 4003885055, +STORE, 4003885056, 4003889151, +STORE, 4003889152, 4003893247, +STORE, 4003893248, 4003897343, +STORE, 4003897344, 4003962879, +STORE, 4003962880, 4004069375, +STORE, 4004069376, 4004093951, +STORE, 4004093952, 4004118527, +STORE, 4004118528, 4004122623, +STORE, 4004122624, 4004126719, +STORE, 4004126720, 4004155391, +STORE, 4004155392, 4004286463, +STORE, 4004286464, 4004384767, +STORE, 4004384768, 4004388863, +STORE, 4004388864, 4004646911, +STORE, 4004646912, 4004655103, +STORE, 4004655104, 4004659199, +STORE, 4004659200, 4004667391, +STORE, 4004667392, 4004683775, +STORE, 4004683776, 4004814847, +STORE, 4004814848, 4004818943, +STORE, 4004818944, 4004823039, +STORE, 4004823040, 4004827135, +STORE, 4004827136, 4004835327, +STORE, 4004835328, 4004954111, +STORE, 4004954112, 4005085183, +STORE, 4005085184, 4005306367, +STORE, 4005306368, 4005765119, +STORE, 4005765120, 4005789695, +STORE, 4005789696, 4005793791, +STORE, 4005793792, 4005801983, +STORE, 4005801984, 4005920767, +STORE, 4005920768, 4005945343, +STORE, 4005945344, 4005949439, +STORE, 4005949440, 4005986303, +STORE, 4005986304, 4005990399, +STORE, 4005990400, 4005994495, +STORE, 4005994496, 4006002687, +STORE, 4006002688, 4006109183, +STORE, 4006109184, 4006117375, +STORE, 4006117376, 4006121471, +STORE, 4006121472, 4006133759, +STORE, 4006133760, 4006137855, +STORE, 4006137856, 4006141951, +STORE, 4006141952, 4006150143, +STORE, 4006150144, 4006391807, +STORE, 4006391808, 4006445055, +STORE, 4006445056, 4006563839, +STORE, 4006563840, 4006572031, +STORE, 4006572032, 4006576127, +STORE, 4006576128, 4006584319, +STORE, 4006584320, 4006694911, +STORE, 4006694912, 4006739967, +STORE, 4006739968, 4006776831, +STORE, 4006776832, 4006785023, +STORE, 4006785024, 4006789119, +STORE, 4006789120, 4006797311, +STORE, 4006797312, 4006813695, +STORE, 4006813696, 4006846463, +STORE, 4006846464, 4006977535, +STORE, 4006977536, 4007006207, +STORE, 4007006208, 4007010303, +STORE, 4007010304, 4007067647, +STORE, 4007067648, 4007075839, +STORE, 4007075840, 4007084031, +STORE, 4007084032, 4007100415, +STORE, 4007100416, 4007116799, +STORE, 4007116800, 4007133183, +STORE, 4007133184, 4007153663, +STORE, 4007153664, 4007178239, +STORE, 4007178240, 4007202815, +STORE, 4007202816, 4007206911, +STORE, 4007206912, 4007272447, +STORE, 4007272448, 4007276543, +STORE, 4007276544, 4007280639, +STORE, 4007280640, 4007284735, +STORE, 4007284736, 4007292927, +STORE, 4007292928, 4007423999, +STORE, 4007424000, 4007448575, +STORE, 4007448576, 4007452671, +STORE, 4007452672, 4007505919, +STORE, 4007505920, 4007510015, +STORE, 4007510016, 4007514111, +STORE, 4007514112, 4007645183, +STORE, 4007645184, 4007776255, +STORE, 4007776256, 4007780351, +STORE, 4007780352, 4007784447, +STORE, 4007784448, 4007788543, +STORE, 4007788544, 4007809023, +STORE, 4007809024, 4007829503, +STORE, 4007829504, 4007960575, +STORE, 4007960576, 4008091647, +STORE, 4008091648, 4008296447, +STORE, 4008296448, 4008890367, +STORE, 4008890368, 4008898559, +STORE, 4008898560, 4008902655, +STORE, 4008902656, 4008996863, +STORE, 4008996864, 4009041919, +STORE, 4009041920, 4009082879, +STORE, 4009082880, 4009091071, +STORE, 4009091072, 4009107455, +STORE, 4009107456, 4009349119, +STORE, 4009349120, 4009373695, +STORE, 4009373696, 4009414655, +STORE, 4009414656, 4009422847, +STORE, 4009422848, 4009426943, +STORE, 4009426944, 4009447423, +STORE, 4009447424, 4009471999, +STORE, 4009472000, 4009512959, +STORE, 4009512960, 4009594879, +STORE, 4009594880, 4009598975, +STORE, 4009598976, 4009697279, +STORE, 4009697280, 4009713663, +STORE, 4009713664, 4009717759, +STORE, 4009717760, 4009721855, +STORE, 4009721856, 4009730047, +STORE, 4009730048, 4009861119, +STORE, 4009861120, 4009951231, +STORE, 4009951232, 4010131455, +STORE, 4010131456, 4010135551, +STORE, 4010135552, 4010139647, +STORE, 4010139648, 4010143743, +STORE, 4010143744, 4010164223, +STORE, 4010164224, 4010295295, +STORE, 4010295296, 4010299391, +STORE, 4010299392, 4010491903, +STORE, 4010491904, 4010495999, +STORE, 4010496000, 4010668031, +STORE, 4010668032, 4011028479, +STORE, 4011028480, 4011053055, +STORE, 4011053056, 4011057151, +STORE, 4011057152, 4011118591, +STORE, 4011118592, 4011126783, +STORE, 4011126784, 4011130879, +STORE, 4011130880, 4011143167, +STORE, 4011143168, 4011147263, +STORE, 4011147264, 4011167743, +STORE, 4011167744, 4011171839, +STORE, 4011171840, 4011360255, +STORE, 4011360256, 4011364351, +STORE, 4011364352, 4011626495, +STORE, 4011626496, 4012216319, +STORE, 4012216320, 4012228607, +STORE, 4012228608, 4012232703, +STORE, 4012232704, 4012236799, +STORE, 4012236800, 4012240895, +STORE, 4012240896, 4012261375, +STORE, 4012261376, 4012392447, +STORE, 4012392448, 4012466175, +STORE, 4012466176, 4012597247, +STORE, 4012597248, 4012601343, +STORE, 4012601344, 4012605439, +STORE, 4012605440, 4012609535, +STORE, 4012609536, 4012679167, +STORE, 4012679168, 4013563903, +STORE, 4013563904, 4015366143, +STORE, 4015366144, 4015411199, +STORE, 4015411200, 4015415295, +STORE, 4015415296, 4015419391, +STORE, 4015419392, 4015542271, +STORE, 4015542272, 4015550463, +STORE, 4015550464, 4015558655, +STORE, 4015558656, 4015562751, +STORE, 4015562752, 4015583231, +STORE, 4015583232, 4015587327, +STORE, 4015587328, 4015603711, +STORE, 4015665152, 4015669247, +STORE, 4015669248, 4015812607, +STORE, 4015812608, 4015816703, +STORE, 4015816704, 4016111615, +STORE, 4016111616, 4016467967, +STORE, 4016467968, 4016508927, +STORE, 4016508928, 4016517119, +STORE, 4016517120, 4016525311, +STORE, 4016525312, 4016586751, +STORE, 4016586752, 4016664575, +STORE, 4016664576, 4016697343, +STORE, 4016697344, 4016742399, +STORE, 4016742400, 4016746495, +STORE, 4016746496, 4016750591, +STORE, 4016750592, 4016758783, +STORE, 4016799744, 4016844799, +STORE, 4016844800, 4016902143, +STORE, 4016902144, 4016992255, +STORE, 4016992256, 4017000447, +STORE, 4017000448, 4017004543, +STORE, 4017004544, 4017008639, +STORE, 4017008640, 4017016831, +STORE, 4017016832, 4017020927, +STORE, 4017020928, 4017127423, +STORE, 4017127424, 4017131519, +STORE, 4017131520, 4017229823, +STORE, 4017229824, 4017422335, +STORE, 4017422336, 4017438719, +STORE, 4017438720, 4017442815, +STORE, 4017442816, 4017446911, +STORE, 4017446912, 4017455103, +STORE, 4017455104, 4017766399, +STORE, 4017766400, 4017909759, +STORE, 4017909760, 4018081791, +STORE, 4018081792, 4018089983, +STORE, 4018089984, 4018094079, +STORE, 4018094080, 4018098175, +STORE, 4018098176, 4018327551, +STORE, 4018327552, 4018331647, +STORE, 4018331648, 4018339839, +STORE, 4018339840, 4018348031, +STORE, 4018348032, 4018610175, +STORE, 4018610176, 4018626559, +STORE, 4018626560, 4018647039, +STORE, 4018647040, 4018651135, +STORE, 4018651136, 4018749439, +STORE, 4018749440, 4018761727, +STORE, 4018761728, 4018802687, +STORE, 4018802688, 4018806783, +STORE, 4018806784, 4018810879, +STORE, 4018810880, 4018814975, +STORE, 4018814976, 4018823167, +STORE, 4018823168, 4018954239, +STORE, 4018954240, 4019007487, +STORE, 4019007488, 4019068927, +STORE, 4019068928, 4019077119, +STORE, 4019077120, 4019081215, +STORE, 4019081216, 4019093503, +STORE, 4019093504, 4019208191, +STORE, 4019208192, 4019232767, +STORE, 4019232768, 4019265535, +STORE, 4019265536, 4019269631, +STORE, 4019269632, 4019277823, +STORE, 4019277824, 4019458047, +STORE, 4019458048, 4019519487, +STORE, 4019519488, 4019613695, +STORE, 4019613696, 4019621887, +STORE, 4019621888, 4019625983, +STORE, 4019625984, 4019630079, +STORE, 4019630080, 4019744767, +STORE, 4019744768, 4019822591, +STORE, 4019822592, 4019929087, +STORE, 4019929088, 4019941375, +STORE, 4019941376, 4019945471, +STORE, 4019945472, 4019961855, +STORE, 4019961856, 4019994623, +STORE, 4019994624, 4019998719, +STORE, 4019998720, 4020002815, +STORE, 4020002816, 4020006911, +STORE, 4020006912, 4020011007, +STORE, 4020011008, 4020256767, +STORE, 4020256768, 4020326399, +STORE, 4020326400, 4020457471, +STORE, 4020457472, 4020469759, +STORE, 4020469760, 4020473855, +STORE, 4020473856, 4020482047, +STORE, 4020482048, 4020711423, +STORE, 4020711424, 4020715519, +STORE, 4020715520, 4020719615, +STORE, 4020719616, 4020723711, +STORE, 4020723712, 4020805631, +STORE, 4020805632, 4021051391, +STORE, 4021051392, 4021460991, +STORE, 4021460992, 4021469183, +STORE, 4021469184, 4021473279, +STORE, 4021473280, 4021571583, +STORE, 4021571584, 4021633023, +STORE, 4021633024, 4021727231, +STORE, 4021727232, 4021735423, +STORE, 4021735424, 4021739519, +STORE, 4021739520, 4021747711, +STORE, 4021747712, 4021829631, +STORE, 4021829632, 4021866495, +STORE, 4021866496, 4021919743, +STORE, 4021919744, 4021927935, +STORE, 4021927936, 4021932031, +STORE, 4021932032, 4021944319, +STORE, 4021944320, 4022157311, +STORE, 4022157312, 4022161407, +STORE, 4022161408, 4022173695, +STORE, 4022173696, 4022177791, +STORE, 4022177792, 4022472703, +STORE, 4022472704, 4022509567, +STORE, 4022509568, 4022583295, +STORE, 4022583296, 4022587391, +STORE, 4022587392, 4022591487, +STORE, 4022591488, 4022607871, +STORE, 4022607872, 4022657023, +STORE, 4022657024, 4022722559, +STORE, 4022722560, 4022730751, +STORE, 4022730752, 4022734847, +STORE, 4022734848, 4022865919, +STORE, 4022865920, 4022943743, +STORE, 4022943744, 4023062527, +STORE, 4023062528, 4023074815, +STORE, 4023074816, 4023078911, +STORE, 4023078912, 4023128063, +STORE, 4023128064, 4023218175, +STORE, 4023218176, 4023361535, +STORE, 4023361536, 4023373823, +STORE, 4023373824, 4023377919, +STORE, 4023377920, 4023558143, +STORE, 4023558144, 4023631871, +STORE, 4023631872, 4023816191, +STORE, 4023816192, 4023820287, +STORE, 4023820288, 4023824383, +STORE, 4023824384, 4023832575, +STORE, 4023832576, 4024078335, +STORE, 4024078336, 4024197119, +STORE, 4024197120, 4024389631, +STORE, 4024389632, 4024406015, +STORE, 4024406016, 4024410111, +STORE, 4024410112, 4024422399, +STORE, 4024422400, 4024619007, +STORE, 4024619008, 4024639487, +STORE, 4024639488, 4024655871, +STORE, 4024655872, 4024664063, +STORE, 4024664064, 4024668159, +STORE, 4024668160, 4024676351, +STORE, 4024676352, 4024905727, +STORE, 4024905728, 4024909823, +STORE, 4024909824, 4024918015, +STORE, 4024918016, 4024922111, +STORE, 4024922112, 4024930303, +STORE, 4024930304, 4025110527, +STORE, 4025110528, 4025176063, +STORE, 4025176064, 4025208831, +STORE, 4025208832, 4025212927, +STORE, 4025212928, 4025217023, +STORE, 4025217024, 4025348095, +STORE, 4025348096, 4025372671, +STORE, 4025372672, 4025458687, +STORE, 4025458688, 4025466879, +STORE, 4025466880, 4025565183, +STORE, 4025565184, 4025757695, +STORE, 4025757696, 4026249215, +STORE, 4026249216, 4026261503, +STORE, 4026261504, 4026265599, +STORE, 4026265600, 4026269695, +STORE, 4026269696, 4026302463, +STORE, 4026302464, 4026306559, +STORE, 4026306560, 4026314751, +STORE, 4026314752, 4026318847, +STORE, 4026318848, 4026322943, +STORE, 4026322944, 4026327039, +STORE, 4026327040, 4026654719, +STORE, 4026654720, 4026671103, +STORE, 4026671104, 4026720255, +STORE, 4026720256, 4026724351, +STORE, 4026724352, 4026728447, +STORE, 4026728448, 4026732543, +STORE, 4026732544, 4026863615, +STORE, 4026863616, 4027027455, +STORE, 4027027456, 4027031551, +STORE, 4027031552, 4027514879, +STORE, 4027514880, 4027531263, +STORE, 4027531264, 4027535359, +STORE, 4027535360, 4027539455, +STORE, 4027539456, 4027785215, +STORE, 4027785216, 4027789311, +STORE, 4027789312, 4027793407, +STORE, 4027793408, 4027797503, +STORE, 4027797504, 4027863039, +STORE, 4027863040, 4027899903, +STORE, 4027899904, 4027949055, +STORE, 4027949056, 4027957247, +STORE, 4027957248, 4027961343, +STORE, 4027961344, 4027965439, +STORE, 4027965440, 4028194815, +STORE, 4028194816, 4028252159, +STORE, 4028252160, 4028338175, +STORE, 4028338176, 4028350463, +STORE, 4028350464, 4028354559, +STORE, 4028354560, 4028452863, +STORE, 4028452864, 4028489727, +STORE, 4028489728, 4028530687, +STORE, 4028530688, 4028538879, +STORE, 4028538880, 4028542975, +STORE, 4028542976, 4028551167, +STORE, 4028551168, 4028665855, +STORE, 4028665856, 4029349887, +STORE, 4029349888, 4030468095, +STORE, 4030468096, 4030513151, +STORE, 4030513152, 4030517247, +STORE, 4030517248, 4030525439, +STORE, 4030525440, 4030529535, +STORE, 4030529536, 4030758911, +STORE, 4030758912, 4030828543, +STORE, 4030828544, 4030943231, +STORE, 4030943232, 4030951423, +STORE, 4030951424, 4030955519, +STORE, 4030955520, 4030967807, +STORE, 4030967808, 4031131647, +STORE, 4031131648, 4031135743, +STORE, 4031135744, 4031139839, +STORE, 4031139840, 4031148031, +STORE, 4031148032, 4031152127, +STORE, 4031152128, 4031160319, +STORE, 4031160320, 4031504383, +STORE, 4031504384, 4031598591, +STORE, 4031598592, 4031754239, +STORE, 4031754240, 4031766527, +STORE, 4031766528, 4031770623, +STORE, 4031770624, 4031774719, +STORE, 4031774720, 4031782911, +STORE, 4031782912, 4031799295, +STORE, 4031799296, 4031856639, +STORE, 4031856640, 4031983615, +STORE, 4031983616, 4031987711, +STORE, 4031987712, 4031991807, +STORE, 4031991808, 4032270335, +STORE, 4032270336, 4032274431, +STORE, 4032274432, 4032282623, +STORE, 4032282624, 4032286719, +STORE, 4032286720, 4032290815, +STORE, 4032290816, 4032389119, +STORE, 4032389120, 4032397311, +STORE, 4032397312, 4032405503, +STORE, 4032405504, 4032413695, +STORE, 4032413696, 4032417791, +STORE, 4032417792, 4032565247, +STORE, 4032565248, 4032593919, +STORE, 4032593920, 4032737279, +STORE, 4032737280, 4032741375, +STORE, 4032741376, 4032745471, +STORE, 4032745472, 4032770047, +STORE, 4032770048, 4032933887, +STORE, 4032933888, 4032999423, +STORE, 4032999424, 4033032191, +STORE, 4033032192, 4033036287, +STORE, 4033036288, 4033040383, +STORE, 4033040384, 4033105919, +STORE, 4033105920, 4033396735, +STORE, 4033396736, 4033822719, +STORE, 4033822720, 4033839103, +STORE, 4033839104, 4033843199, +STORE, 4033843200, 4033851391, +STORE, 4033851392, 4033863679, +STORE, 4033863680, 4033880063, +STORE, 4033880064, 4033933311, +STORE, 4033933312, 4034023423, +STORE, 4034023424, 4034031615, +STORE, 4034031616, 4034035711, +STORE, 4034035712, 4034043903, +STORE, 4034043904, 4034142207, +STORE, 4034142208, 4034191359, +STORE, 4034191360, 4034260991, +STORE, 4034260992, 4034269183, +STORE, 4034269184, 4034273279, +STORE, 4034273280, 4034281471, +STORE, 4034281472, 4034412543, +STORE, 4034412544, 4034445311, +STORE, 4034445312, 4034490367, +STORE, 4034490368, 4034494463, +STORE, 4034494464, 4034498559, +STORE, 4034498560, 4034662399, +STORE, 4034662400, 4034666495, +STORE, 4034666496, 4034670591, +STORE, 4034670592, 4034674687, +STORE, 4034674688, 4034678783, +STORE, 4034678784, 4034682879, +STORE, 4034682880, 4034781183, +STORE, 4034781184, 4035043327, +STORE, 4035043328, 4035047423, +STORE, 4035047424, 4035055615, +STORE, 4035055616, 4035059711, +STORE, 4035059712, 4035063807, +STORE, 4035063808, 4035067903, +STORE, 4035067904, 4035100671, +STORE, 4035100672, 4035375103, +STORE, 4035375104, 4035383295, +STORE, 4035383296, 4035395583, +STORE, 4035395584, 4035399679, +STORE, 4035399680, 4035403775, +STORE, 4035403776, 4035407871, +STORE, 4035407872, 4035411967, +STORE, 4035411968, 4035477503, +STORE, 4035477504, 4035608575, +STORE, 4035608576, 4035641343, +STORE, 4035641344, 4035682303, +STORE, 4035682304, 4035686399, +STORE, 4035686400, 4035690495, +STORE, 4035690496, 4035694591, +STORE, 4035694592, 4035743743, +STORE, 4035743744, 4035784703, +STORE, 4035784704, 4035829759, +STORE, 4035829760, 4035837951, +STORE, 4035837952, 4035842047, +STORE, 4035842048, 4035846143, +STORE, 4035846144, 4035850239, +STORE, 4035850240, 4036001791, +STORE, 4036001792, 4036005887, +STORE, 4036005888, 4036214783, +STORE, 4036214784, 4036218879, +STORE, 4036218880, 4036603903, +STORE, 4036603904, 4036648959, +STORE, 4036648960, 4036653055, +STORE, 4036653056, 4036657151, +STORE, 4036657152, 4036665343, +STORE, 4036665344, 4036780031, +STORE, 4036780032, 4036829183, +STORE, 4036829184, 4036984831, +STORE, 4036984832, 4036993023, +STORE, 4036993024, 4036997119, +STORE, 4036997120, 4037001215, +STORE, 4037001216, 4037009407, +STORE, 4037009408, 4037025791, +STORE, 4037025792, 4037095423, +STORE, 4037095424, 4037181439, +STORE, 4037181440, 4037193727, +STORE, 4037193728, 4037197823, +STORE, 4037197824, 4037206015, +STORE, 4037206016, 4037320703, +STORE, 4037320704, 4037337087, +STORE, 4037337088, 4037349375, +STORE, 4037349376, 4037357567, +STORE, 4037357568, 4037361663, +STORE, 4037369856, 4037386239, +STORE, 4037386240, 4037672959, +STORE, 4037672960, 4037689343, +STORE, 4037689344, 4037730303, +STORE, 4037730304, 4037734399, +STORE, 4037734400, 4037738495, +STORE, 4037738496, 4037742591, +STORE, 4037742592, 4037758975, +STORE, 4037758976, 4037890047, +STORE, 4037890048, 4037931007, +STORE, 4037931008, 4037976063, +STORE, 4037976064, 4037984255, +STORE, 4037984256, 4037988351, +STORE, 4037988352, 4038053887, +STORE, 4038053888, 4038184959, +STORE, 4038184960, 4038189055, +STORE, 4038189056, 4038197247, +STORE, 4038197248, 4038201343, +STORE, 4038201344, 4038205439, +STORE, 4038205440, 4038209535, +STORE, 4038217728, 4038250495, +STORE, 4038250496, 4038512639, +STORE, 4038512640, 4038516735, +STORE, 4038516736, 4038520831, +STORE, 4038520832, 4038524927, +STORE, 4038524928, 4038529023, +STORE, 4038529024, 4038533119, +STORE, 4038541312, 4038623231, +STORE, 4038623232, 4038754303, +STORE, 4038754304, 4038885375, +STORE, 4038885376, 4038889471, +STORE, 4038897664, 4038963199, +STORE, 4038963200, 4038967295, +STORE, 4038967296, 4038983679, +STORE, 4038983680, 4039114751, +STORE, 4039114752, 4039245823, +STORE, 4039245824, 4039376895, +STORE, 4039376896, 4040687615, +STORE, 4040687616, 4040691711, +STORE, 4040691712, 4040806399, +STORE, 4040806400, 4040937471, +STORE, 4040937472, 4040941567, +STORE, 4040945664, 4040949759, +STORE, 4040949760, 4041080831, +STORE, 4041080832, 4041211903, +STORE, 4041211904, 4043046911, +STORE, 4043046912, 4043051007, +STORE, 4043051008, 4043055103, +STORE, 4043055104, 4043137023, +STORE, 4043137024, 4043141119, +STORE, 4043141120, 4043145215, +STORE, 4043145216, 4043153407, +STORE, 4043153408, 4043186175, +STORE, 4043186176, 4043317247, +STORE, 4043317248, 4043448319, +STORE, 4043448320, 4043579391, +STORE, 4043579392, 4043583487, +STORE, 4043583488, 4043599871, +STORE, 4043599872, 4043661311, +STORE, 4043661312, 4043792383, +STORE, 4043792384, 4043796479, +STORE, 4043796480, 4043800575, +STORE, 4043800576, 4043816959, +STORE, 4043816960, 4043821055, +STORE, 4043821056, 4043825151, +STORE, 4043825152, 4043829247, +STORE, 4043829248, 4043833343, +STORE, 4043833344, 4047241215, +STORE, 4047241216, 4047249407, +STORE, 4047249408, 4047253503, +STORE, 4047253504, 4047323135, +STORE, 4047323136, 4047327231, +STORE, 4047327232, 4047458303, +STORE, 4047458304, 4047589375, +STORE, 4047589376, 4047720447, +STORE, 4047720448, 4047773695, +STORE, 4047773696, 4047790079, +STORE, 4047790080, 4047921151, +STORE, 4047921152, 4048052223, +STORE, 4048052224, 4048183295, +STORE, 4048183296, 4049002495, +STORE, 4049002496, 4049133567, +STORE, 4049133568, 4049154047, +STORE, 4049154048, 4049158143, +STORE, 4049158144, 4049162239, +STORE, 4049162240, 4049166335, +STORE, 4049166336, 4049174527, +STORE, 4049174528, 4049182719, +STORE, 4049182720, 4049186815, +STORE, 4049186816, 4049190911, +STORE, 4049190912, 4049195007, +STORE, 4049195008, 4049203199, +STORE, 4049203200, 4049207295, +STORE, 4049207296, 4049211391, +STORE, 4049211392, 4049215487, +STORE, 4049215488, 4049219583, +STORE, 4049219584, 4049227775, +STORE, 4049227776, 4049231871, +STORE, 4049231872, 4049235967, +STORE, 4049235968, 4049244159, +STORE, 4049244160, 4049248255, +STORE, 4049248256, 4049252351, +STORE, 4049252352, 4049256447, +STORE, 4049256448, 4049268735, +STORE, 4049268736, 4049272831, +STORE, 4049272832, 4049313791, +STORE, 4049313792, 4049723391, +STORE, 4049723392, 4049727487, +STORE, 4049727488, 4049858559, +STORE, 4049858560, 4049989631, +STORE, 4049989632, 4049993727, +STORE, 4049993728, 4050026495, +STORE, 4050026496, 4050030591, +STORE, 4050030592, 4050161663, +STORE, 4050161664, 4050169855, +STORE, 4050169856, 4050223103, +STORE, 4050223104, 4050632703, +STORE, 4050632704, 4050636799, +STORE, 4050636800, 4050640895, +STORE, 4050640896, 4050644991, +STORE, 4050644992, 4050661375, +STORE, 4050661376, 4050665471, +STORE, 4050665472, 4050673663, +STORE, 4050673664, 4050677759, +STORE, 4050677760, 4050694143, +STORE, 4050694144, 4050702335, +STORE, 4050702336, 4050956287, +STORE, 4050956288, 4051963903, +STORE, 4051963904, 4051980287, +STORE, 4051980288, 4051988479, +STORE, 4051988480, 4052000767, +STORE, 4052000768, 4052004863, +STORE, 4052004864, 4052029439, +STORE, 4284014592, 4284018687, +STORE, 4284018688, 4292403199, +SNULL, 4041080832, 4041211903, +SNULL, 3795763200, 3795894271, +STORE, 3629522944, 3696631807, +SNULL, 3663077375, 3696631807, +STORE, 3629522944, 3663077375, +STORE, 3663077376, 3696631807, +SNULL, 3663077376, 3696631807, +STORE, 3663077376, 3696631807, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3626471424, 3627524095, +SNULL, 3626471424, 3626475519, +STORE, 3626475520, 3627524095, +STORE, 3626471424, 3626475519, +SNULL, 3627519999, 3627524095, +STORE, 3626475520, 3627519999, +STORE, 3627520000, 3627524095, +STORE, 3625418752, 3626475519, +SNULL, 3625418752, 3625422847, +STORE, 3625422848, 3626475519, +STORE, 3625418752, 3625422847, +SNULL, 3626467327, 3626475519, +STORE, 3625422848, 3626467327, +STORE, 3626467328, 3626475519, +STORE, 3624366080, 3625422847, +SNULL, 3624366080, 3624370175, +STORE, 3624370176, 3625422847, +STORE, 3624366080, 3624370175, +SNULL, 3625414655, 3625422847, +STORE, 3624370176, 3625414655, +STORE, 3625414656, 3625422847, +STORE, 4041191424, 4041211903, +SNULL, 4041195519, 4041211903, +STORE, 4041191424, 4041195519, +STORE, 4041195520, 4041211903, +STORE, 4041170944, 4041191423, +SNULL, 4041175039, 4041191423, +STORE, 4041170944, 4041175039, +STORE, 4041175040, 4041191423, +SNULL, 3625426943, 3626467327, +STORE, 3625422848, 3625426943, +STORE, 3625426944, 3626467327, +STORE, 4041162752, 4041170943, +SNULL, 3626479615, 3627519999, +STORE, 3626475520, 3626479615, +STORE, 3626479616, 3627519999, +STORE, 4041154560, 4041162751, +STORE, 4041154560, 4041170943, +STORE, 4041134080, 4041154559, +SNULL, 4041138175, 4041154559, +STORE, 4041134080, 4041138175, +STORE, 4041138176, 4041154559, +SNULL, 3624374271, 3625414655, +STORE, 3624370176, 3624374271, +STORE, 3624374272, 3625414655, +STORE, 4041125888, 4041134079, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +STORE, 3487174656, 3487584255, +STORE, 4041121792, 4041125887, +SNULL, 4041121792, 4041125887, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 3487174656, 3487584255, +STORE, 3222274048, 3223326719, +SNULL, 3222274048, 3222278143, +STORE, 3222278144, 3223326719, +STORE, 3222274048, 3222278143, +SNULL, 3223322623, 3223326719, +STORE, 3222278144, 3223322623, +STORE, 3223322624, 3223326719, +STORE, 3221221376, 3222278143, +SNULL, 3221221376, 3221225471, +STORE, 3221225472, 3222278143, +STORE, 3221221376, 3221225471, +SNULL, 3222269951, 3222278143, +STORE, 3221225472, 3222269951, +STORE, 3222269952, 3222278143, +STORE, 3220168704, 3221225471, +SNULL, 3220168704, 3220172799, +STORE, 3220172800, 3221225471, +STORE, 3220168704, 3220172799, +SNULL, 3221217279, 3221225471, +STORE, 3220172800, 3221217279, +STORE, 3221217280, 3221225471, +STORE, 4041117696, 4041125887, +STORE, 4041117696, 4041134079, +STORE, 3219083264, 3220172799, +SNULL, 3219083264, 3219087359, +STORE, 3219087360, 3220172799, +STORE, 3219083264, 3219087359, +SNULL, 3220164607, 3220172799, +STORE, 3219087360, 3220164607, +STORE, 3220164608, 3220172799, +STORE, 4041109504, 4041117695, +STORE, 4041109504, 4041134079, +STORE, 3217997824, 3219087359, +SNULL, 3217997824, 3218001919, +STORE, 3218001920, 3219087359, +STORE, 3217997824, 3218001919, +SNULL, 3219079167, 3219087359, +STORE, 3218001920, 3219079167, +STORE, 3219079168, 3219087359, +STORE, 4041101312, 4041109503, +STORE, 4041101312, 4041134079, +STORE, 3216912384, 3218001919, +SNULL, 3216912384, 3216916479, +STORE, 3216916480, 3218001919, +STORE, 3216912384, 3216916479, +SNULL, 3217993727, 3218001919, +STORE, 3216916480, 3217993727, +STORE, 3217993728, 3218001919, +STORE, 4041093120, 4041101311, +STORE, 4041093120, 4041134079, +STORE, 3215826944, 3216916479, +SNULL, 3215826944, 3215831039, +STORE, 3215831040, 3216916479, +STORE, 3215826944, 3215831039, +SNULL, 3216908287, 3216916479, +STORE, 3215831040, 3216908287, +STORE, 3216908288, 3216916479, +STORE, 4016779264, 4016799743, +SNULL, 4016783359, 4016799743, +STORE, 4016779264, 4016783359, +STORE, 4016783360, 4016799743, +STORE, 4016758784, 4016779263, +SNULL, 4016762879, 4016779263, +STORE, 4016758784, 4016762879, +STORE, 4016762880, 4016779263, +SNULL, 3222282239, 3223322623, +STORE, 3222278144, 3222282239, +STORE, 3222282240, 3223322623, +STORE, 4041084928, 4041093119, +STORE, 4041084928, 4041134079, +SNULL, 3221229567, 3222269951, +STORE, 3221225472, 3221229567, +STORE, 3221229568, 3222269951, +STORE, 4015644672, 4015665151, +STORE, 4038889472, 4038897663, +SNULL, 4015648767, 4015665151, +STORE, 4015644672, 4015648767, +STORE, 4015648768, 4015665151, +STORE, 4015624192, 4015644671, +SNULL, 4015628287, 4015644671, +STORE, 4015624192, 4015628287, +STORE, 4015628288, 4015644671, +SNULL, 3219091455, 3220164607, +STORE, 3219087360, 3219091455, +STORE, 3219091456, 3220164607, +STORE, 4015603712, 4015624191, +SNULL, 4015607807, 4015624191, +STORE, 4015603712, 4015607807, +STORE, 4015607808, 4015624191, +SNULL, 3218006015, 3219079167, +STORE, 3218001920, 3218006015, +STORE, 3218006016, 3219079167, +STORE, 3949674496, 3949694975, +SNULL, 3949678591, 3949694975, +STORE, 3949674496, 3949678591, +STORE, 3949678592, 3949694975, +SNULL, 3216920575, 3217993727, +STORE, 3216916480, 3216920575, +STORE, 3216920576, 3217993727, +STORE, 3948924928, 3948945407, +SNULL, 3948929023, 3948945407, +STORE, 3948924928, 3948929023, +STORE, 3948929024, 3948945407, +SNULL, 3215835135, 3216908287, +STORE, 3215831040, 3215835135, +STORE, 3215835136, 3216908287, +SNULL, 3220176895, 3221217279, +STORE, 3220172800, 3220176895, +STORE, 3220176896, 3221217279, +STORE, 3214786560, 3215826943, +STORE, 3213733888, 3214786559, +SNULL, 3213733888, 3213737983, +STORE, 3213737984, 3214786559, +STORE, 3213733888, 3213737983, +SNULL, 3214782463, 3214786559, +STORE, 3213737984, 3214782463, +STORE, 3214782464, 3214786559, +STORE, 4038533120, 4038541311, +STORE, 3948421120, 3948441599, +SNULL, 3948425215, 3948441599, +STORE, 3948421120, 3948425215, +STORE, 3948425216, 3948441599, +SNULL, 3213742079, 3214782463, +STORE, 3213737984, 3213742079, +STORE, 3213742080, 3214782463, +STORE, 4038209536, 4038217727, +STORE, 3212681216, 3213737983, +SNULL, 3212681216, 3212685311, +STORE, 3212685312, 3213737983, +STORE, 3212681216, 3212685311, +SNULL, 3213729791, 3213737983, +STORE, 3212685312, 3213729791, +STORE, 3213729792, 3213737983, +STORE, 3795763200, 3795894271, +STORE, 3946872832, 3946893311, +SNULL, 3946876927, 3946893311, +STORE, 3946872832, 3946876927, +STORE, 3946876928, 3946893311, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +STORE, 3487174656, 3487584255, +SNULL, 3212689407, 3213729791, +STORE, 3212685312, 3212689407, +STORE, 3212689408, 3213729791, +STORE, 4041080832, 4041084927, +STORE, 4040941568, 4040945663, +STORE, 4037361664, 4037369855, +STORE, 4000817152, 4000821247, +STORE, 3999440896, 3999444991, +STORE, 3212161024, 3212681215, +SNULL, 3212161024, 3212439551, +STORE, 3212439552, 3212681215, +STORE, 3212161024, 3212439551, +SNULL, 3212161024, 3212439551, +SNULL, 3212464127, 3212681215, +STORE, 3212439552, 3212464127, +STORE, 3212464128, 3212681215, +SNULL, 3212464128, 3212681215, +SNULL, 3212439552, 3212451839, +STORE, 3212451840, 3212464127, +STORE, 3212439552, 3212451839, +SNULL, 3212439552, 3212451839, +STORE, 3212439552, 3212451839, +SNULL, 3212451840, 3212455935, +STORE, 3212455936, 3212464127, +STORE, 3212451840, 3212455935, +SNULL, 3212451840, 3212455935, +STORE, 3212451840, 3212455935, +SNULL, 3212455936, 3212460031, +STORE, 3212460032, 3212464127, +STORE, 3212455936, 3212460031, +SNULL, 3212455936, 3212460031, +STORE, 3212455936, 3212460031, +SNULL, 3212460032, 3212464127, +STORE, 3212460032, 3212464127, +STORE, 3997679616, 3997683711, +SNULL, 4049235968, 4049240063, +STORE, 4049240064, 4049244159, +STORE, 4049235968, 4049240063, +SNULL, 4049240064, 4049244159, +STORE, 4049240064, 4049244159, +SNULL, 3997679616, 3997683711, +SNULL, 3999440896, 3999444991, +SNULL, 4000817152, 4000821247, +SNULL, 4040941568, 4040945663, +SNULL, 4041080832, 4041084927, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 3487174656, 3487584255, +SNULL, 3212451840, 3212455935, +STORE, 3212451840, 3212455935, +STORE, 4041080832, 4041084927, +STORE, 3623890944, 3624169471, +SNULL, 4041080832, 4041084927, +STORE, 4041080832, 4041084927, +SNULL, 4041080832, 4041084927, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +STORE, 4041080832, 4041084927, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +STORE, 3211386880, 3212439551, +SNULL, 3211386880, 3211390975, +STORE, 3211390976, 3212439551, +STORE, 3211386880, 3211390975, +SNULL, 3212435455, 3212439551, +STORE, 3211390976, 3212435455, +STORE, 3212435456, 3212439551, +STORE, 4040941568, 4040945663, +STORE, 3937169408, 3937189887, +STORE, 3623485440, 3623616511, +SNULL, 717225983, 1388314623, +STORE, 314572800, 717225983, +STORE, 717225984, 1388314623, +SNULL, 717225984, 1388314623, +STORE, 3937112064, 3937132543, +SNULL, 3937116159, 3937132543, +STORE, 3937112064, 3937116159, +STORE, 3937116160, 3937132543, +SNULL, 3211395071, 3212435455, +STORE, 3211390976, 3211395071, +STORE, 3211395072, 3212435455, +STORE, 4000817152, 4000821247, +STORE, 3974823936, 3974832127, +STORE, 3595284480, 3595431935, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +STORE, 3487174656, 3487584255, +STORE, 3999440896, 3999444991, +STORE, 3997679616, 3997683711, +STORE, 3996295168, 3996299263, +STORE, 3996090368, 3996094463, +STORE, 3210866688, 3211386879, +SNULL, 3210866688, 3211001855, +STORE, 3211001856, 3211386879, +STORE, 3210866688, 3211001855, +SNULL, 3210866688, 3211001855, +SNULL, 3211038719, 3211386879, +STORE, 3211001856, 3211038719, +STORE, 3211038720, 3211386879, +SNULL, 3211038720, 3211386879, +SNULL, 3211001856, 3211022335, +STORE, 3211022336, 3211038719, +STORE, 3211001856, 3211022335, +SNULL, 3211001856, 3211022335, +STORE, 3211001856, 3211022335, +SNULL, 3211022336, 3211030527, +STORE, 3211030528, 3211038719, +STORE, 3211022336, 3211030527, +SNULL, 3211022336, 3211030527, +STORE, 3211022336, 3211030527, +SNULL, 3211030528, 3211034623, +STORE, 3211034624, 3211038719, +STORE, 3211030528, 3211034623, +SNULL, 3211030528, 3211034623, +STORE, 3211030528, 3211034623, +SNULL, 3211034624, 3211038719, +STORE, 3211034624, 3211038719, +STORE, 3994906624, 3994910719, +SNULL, 4049240064, 4049244159, +STORE, 4049240064, 4049244159, +SNULL, 3994906624, 3994910719, +SNULL, 3996090368, 3996094463, +SNULL, 3996295168, 3996299263, +SNULL, 3997679616, 3997683711, +SNULL, 3999440896, 3999444991, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 3487174656, 3487584255, +SNULL, 3211022336, 3211030527, +STORE, 3211022336, 3211030527, +STORE, 3999440896, 3999444991, +STORE, 3210199040, 3211001855, +SNULL, 3999440896, 3999444991, +STORE, 3999440896, 3999444991, +SNULL, 3999440896, 3999444991, +STORE, 3594821632, 3594952703, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 4048183296, 4048592895, +STORE, 4048592896, 4049002495, +STORE, 4048183296, 4048592895, +STORE, 4048183296, 4049002495, +SNULL, 1914101759, 1969434623, +STORE, 1914097664, 1914101759, +STORE, 1914101760, 1969434623, +STORE, 3567108096, 3567239167, +STORE, 3973832704, 3973840895, +STORE, 3209113600, 3210199039, +SNULL, 3209113600, 3209117695, +STORE, 3209117696, 3210199039, +STORE, 3209113600, 3209117695, +SNULL, 3210194943, 3210199039, +STORE, 3209117696, 3210194943, +STORE, 3210194944, 3210199039, +STORE, 3935858688, 3935879167, +SNULL, 3935862783, 3935879167, +STORE, 3935858688, 3935862783, +STORE, 3935862784, 3935879167, +SNULL, 3209121791, 3210194943, +STORE, 3209117696, 3209121791, +STORE, 3209121792, 3210194943, +STORE, 3528749056, 3528880127, +STORE, 3968200704, 3968208895, +STORE, 3208028160, 3209117695, +SNULL, 3208028160, 3208032255, +STORE, 3208032256, 3209117695, +STORE, 3208028160, 3208032255, +SNULL, 3209109503, 3209117695, +STORE, 3208032256, 3209109503, +STORE, 3209109504, 3209117695, +STORE, 3888123904, 3888144383, +SNULL, 3888127999, 3888144383, +STORE, 3888123904, 3888127999, +STORE, 3888128000, 3888144383, +SNULL, 3208036351, 3209109503, +STORE, 3208032256, 3208036351, +STORE, 3208036352, 3209109503, +SNULL, 3968200704, 3968208895, +SNULL, 3888123904, 3888144383, +SNULL, 3209109504, 3209113599, +STORE, 3209113600, 3209117695, +STORE, 3209109504, 3209113599, +SNULL, 3208028160, 3209113599, +STORE, 3208060928, 3209117695, +SNULL, 3208060928, 3208065023, +STORE, 3208065024, 3209117695, +STORE, 3208060928, 3208065023, +SNULL, 3209109503, 3209117695, +STORE, 3208065024, 3209109503, +STORE, 3209109504, 3209117695, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3888123904, 3888144383, +SNULL, 3888127999, 3888144383, +STORE, 3888123904, 3888127999, +STORE, 3888128000, 3888144383, +SNULL, 3208069119, 3209109503, +STORE, 3208065024, 3208069119, +STORE, 3208069120, 3209109503, +STORE, 3968200704, 3968208895, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3527778304, 3527909375, +STORE, 3999440896, 3999444991, +STORE, 3997679616, 3997683711, +STORE, 1914097664, 1914105855, +STORE, 1914105856, 1969434623, +STORE, 3957583872, 3957592063, +STORE, 3206975488, 3208065023, +SNULL, 3206975488, 3206979583, +STORE, 3206979584, 3208065023, +STORE, 3206975488, 3206979583, +SNULL, 3208056831, 3208065023, +STORE, 3206979584, 3208056831, +STORE, 3208056832, 3208065023, +STORE, 3956736000, 3956744191, +STORE, 3205890048, 3206979583, +SNULL, 3205890048, 3205894143, +STORE, 3205894144, 3206979583, +STORE, 3205890048, 3205894143, +SNULL, 3206971391, 3206979583, +STORE, 3205894144, 3206971391, +STORE, 3206971392, 3206979583, +STORE, 3806101504, 3806121983, +SNULL, 3806105599, 3806121983, +STORE, 3806101504, 3806105599, +STORE, 3806105600, 3806121983, +SNULL, 3206983679, 3208056831, +STORE, 3206979584, 3206983679, +STORE, 3206983680, 3208056831, +STORE, 3806081024, 3806101503, +SNULL, 3806085119, 3806101503, +STORE, 3806081024, 3806085119, +STORE, 3806085120, 3806101503, +SNULL, 3205898239, 3206971391, +STORE, 3205894144, 3205898239, +STORE, 3205898240, 3206971391, +STORE, 3956015104, 3956023295, +STORE, 3204804608, 3205894143, +SNULL, 3204804608, 3204808703, +STORE, 3204808704, 3205894143, +STORE, 3204804608, 3204808703, +SNULL, 3205885951, 3205894143, +STORE, 3204808704, 3205885951, +STORE, 3205885952, 3205894143, +STORE, 3803471872, 3803492351, +STORE, 3803451392, 3803471871, +STORE, 3803451392, 3803492351, +SNULL, 3957583872, 3957592063, +SNULL, 3806101504, 3806121983, +SNULL, 3206975487, 3206979583, +STORE, 3206971392, 3206975487, +STORE, 3206975488, 3206979583, +SNULL, 3208056832, 3208060927, +STORE, 3208060928, 3208065023, +STORE, 3208056832, 3208060927, +SNULL, 3206975488, 3208060927, +STORE, 3801845760, 3801878527, +STORE, 3806101504, 3806121983, +SNULL, 3806105599, 3806121983, +STORE, 3806101504, 3806105599, +STORE, 3806105600, 3806121983, +SNULL, 3204812799, 3205885951, +STORE, 3204808704, 3204812799, +STORE, 3204812800, 3205885951, +STORE, 1914097664, 1914109951, +STORE, 1914109952, 1969434623, +STORE, 3957583872, 3957592063, +STORE, 3206971392, 3208065023, +SNULL, 3206971392, 3206979583, +STORE, 3206979584, 3208065023, +STORE, 3206971392, 3206979583, +SNULL, 3208056831, 3208065023, +STORE, 3206979584, 3208056831, +STORE, 3208056832, 3208065023, +STORE, 3801825280, 3801845759, +SNULL, 3801829375, 3801845759, +STORE, 3801825280, 3801829375, +STORE, 3801829376, 3801845759, +SNULL, 3206983679, 3208056831, +STORE, 3206979584, 3206983679, +STORE, 3206983680, 3208056831, +STORE, 3202707456, 3204804607, +SNULL, 3202707456, 3204804607, +STORE, 3202707456, 3204804607, +STORE, 3200610304, 3202707455, +SNULL, 3202707456, 3204804607, +SNULL, 3200610304, 3202707455, +STORE, 3202707456, 3204804607, +SNULL, 3202707456, 3204804607, +STORE, 3202707456, 3204804607, +SNULL, 3202707456, 3204804607, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3527647232, 3527778303, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +STORE, 3487059968, 3487584255, +SNULL, 3487059968, 3487301631, +STORE, 3487301632, 3487584255, +STORE, 3487059968, 3487301631, +SNULL, 3487059968, 3487301631, +SNULL, 3487563775, 3487584255, +STORE, 3487301632, 3487563775, +STORE, 3487563776, 3487584255, +SNULL, 3487563776, 3487584255, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3524046848, 3524177919, +STORE, 3487170560, 3487301631, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3487039488, 3487170559, +STORE, 3487039488, 3487301631, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3204280320, 3204804607, +SNULL, 3204280320, 3204448255, +STORE, 3204448256, 3204804607, +STORE, 3204280320, 3204448255, +SNULL, 3204280320, 3204448255, +SNULL, 3204710399, 3204804607, +STORE, 3204448256, 3204710399, +STORE, 3204710400, 3204804607, +SNULL, 3204710400, 3204804607, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3996295168, 3996299263, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +SNULL, 3996295168, 3996299263, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3486908416, 3487039487, +STORE, 3486908416, 3487301631, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3223326720, 3290435583, +SNULL, 3223326720, 3256881151, +STORE, 3256881152, 3290435583, +STORE, 3223326720, 3256881151, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +STORE, 3201826816, 3202351103, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +STORE, 3202351104, 3204448255, +SNULL, 3202351104, 3204448255, +SNULL, 3803471871, 3803492351, +STORE, 3803451392, 3803471871, +STORE, 3803471872, 3803492351, +SNULL, 3803471872, 3803492351, +SNULL, 3803451392, 3803471871, +STORE, 3798999040, 3799101439, +SNULL, 3798999040, 3799101439, +STORE, 3952644096, 3952652287, +STORE, 3203362816, 3204448255, +SNULL, 3203362816, 3203366911, +STORE, 3203366912, 3204448255, +STORE, 3203362816, 3203366911, +SNULL, 3204444159, 3204448255, +STORE, 3203366912, 3204444159, +STORE, 3204444160, 3204448255, +STORE, 3803471872, 3803492351, +SNULL, 3803475967, 3803492351, +STORE, 3803471872, 3803475967, +STORE, 3803475968, 3803492351, +SNULL, 3203371007, 3204444159, +STORE, 3203366912, 3203371007, +STORE, 3203371008, 3204444159, +STORE, 3199729664, 3201826815, +SNULL, 3199729664, 3201826815, +STORE, 3199729664, 3201826815, +SNULL, 3199729664, 3201826815, +STORE, 3199729664, 3201826815, +SNULL, 3199729664, 3201826815, +STORE, 3199729664, 3201826815, +SNULL, 3199729664, 3201826815, +STORE, 3199729664, 3201826815, +SNULL, 3199729664, 3201826815, +STORE, 3200774144, 3201826815, +SNULL, 3200774144, 3200778239, +STORE, 3200778240, 3201826815, +STORE, 3200774144, 3200778239, +SNULL, 3201822719, 3201826815, +STORE, 3200778240, 3201822719, +STORE, 3201822720, 3201826815, +STORE, 3803451392, 3803471871, +SNULL, 3803455487, 3803471871, +STORE, 3803451392, 3803455487, +STORE, 3803455488, 3803471871, +SNULL, 3200782335, 3201822719, +STORE, 3200778240, 3200782335, +STORE, 3200782336, 3201822719, +STORE, 3949666304, 3949674495, +STORE, 3949408256, 3949416447, +STORE, 3199688704, 3200778239, +SNULL, 3199688704, 3199692799, +STORE, 3199692800, 3200778239, +STORE, 3199688704, 3199692799, +SNULL, 3200770047, 3200778239, +STORE, 3199692800, 3200770047, +STORE, 3200770048, 3200778239, +STORE, 3799306240, 3799326719, +SNULL, 3799310335, 3799326719, +STORE, 3799306240, 3799310335, +STORE, 3799310336, 3799326719, +SNULL, 3199696895, 3200770047, +STORE, 3199692800, 3199696895, +STORE, 3199696896, 3200770047, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +STORE, 3799277568, 3799306239, +SNULL, 3799277568, 3799306239, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +SNULL, 4041162751, 4041170943, +STORE, 4041154560, 4041162751, +STORE, 4041162752, 4041170943, +SNULL, 4041162752, 4041170943, +SNULL, 4041154560, 4041162751, +SNULL, 4041191424, 4041211903, +SNULL, 4041170944, 4041191423, +SNULL, 3626471423, 3626475519, +STORE, 3626467328, 3626471423, +STORE, 3626471424, 3626475519, +SNULL, 3626471424, 3627524095, +SNULL, 3625418751, 3625422847, +STORE, 3625414656, 3625418751, +STORE, 3625418752, 3625422847, +SNULL, 3625418752, 3626471423, +STORE, 3627393024, 3627524095, +STORE, 3627261952, 3627393023, +STORE, 3627261952, 3627524095, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +STORE, 3195494400, 3197591551, +SNULL, 3197591552, 3199688703, +SNULL, 3195494400, 3197591551, +STORE, 3197591552, 3199688703, +SNULL, 3197591552, 3199688703, +STORE, 3197591552, 3199688703, +STORE, 3195494400, 3197591551, +SNULL, 3197591552, 3199688703, +SNULL, 3195494400, 3197591551, +STORE, 3798999040, 3799101439, +SNULL, 3798999040, 3799101439, +/* + * mmap: unmapped_area_topdown: ffff9a9f14ddaa80 + * Gap was found: mt 4041162752 gap_end 4041183232 + * mmap: window was 4052029440 - 4096 size 28672 + * mmap: mas.min 4041154560 max 4041191423 mas.last 4041191423 + * mmap: mas.index 4041162752 align mask 0 offset 0 + * mmap: rb_find_vma find on 4041162752 => ffff9a9f03d19678 (ffff9a9f03d19678) + */ + }; + + unsigned long set43[] = { +STORE, 140737488347136, 140737488351231, // ffff8c33d17490a0 +STORE, 140734187720704, 140737488351231, // ffff8c33d17490a0 +SNULL, 140734187724800, 140737488351231, // 0000000000000000 +STORE, 140734187589632, 140734187724799, // ffff8c33d17490a0 +STORE, 4194304, 6443007, // ffff8c33d1748980 +STORE, 4337664, 6443007, // ffff8c33d1748980 +STORE, 4194304, 4337663, // ffff8c33d1749690 +SNULL, 4337664, 6443007, // 0000000000000000 +STORE, 6430720, 6443007, // ffff8c3385968be0 +STORE, 206158430208, 206160674815, // ffff8c3385968e40 +STORE, 206158569472, 206160674815, // ffff8c3385968e40 +STORE, 206158430208, 206158569471, // ffff8c3385968098 +SNULL, 206158569472, 206160674815, // 0000000000000000 +STORE, 206160662528, 206160670719, // ffff8c3385968e40 +STORE, 206160670720, 206160674815, // ffff8c3385968850 +STORE, 140734188756992, 140734188765183, // ffff8c3385969d18 +STORE, 140734188740608, 140734188756991, // ffff8c3385969430 +STORE, 140501948112896, 140501948116991, // ffff8c3385968c78 + }; + + int count = 0; + void *ptr = NULL; + + MA_STATE(mas, mt, 0, 0); + + mt_set_non_kernel(3); + check_erase2_testset(mt, set, ARRAY_SIZE(set)); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set2, ARRAY_SIZE(set2)); + start = 140735933894656; + MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL)); + mtree_destroy(mt); + + mt_set_non_kernel(2); + mt_init_flags(mt, 0); + check_erase2_testset(mt, set3, ARRAY_SIZE(set3)); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_init_flags(mt, 0); + check_erase2_testset(mt, set4, ARRAY_SIZE(set4)); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + if (xa_is_zero(entry)) + continue; + } + rcu_read_unlock(); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mt_set_non_kernel(100); + check_erase2_testset(mt, set5, ARRAY_SIZE(set5)); + rcu_barrier(); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set6, ARRAY_SIZE(set6)); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set7, ARRAY_SIZE(set7)); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set8, ARRAY_SIZE(set8)); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set9, ARRAY_SIZE(set9)); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set10, ARRAY_SIZE(set10)); + rcu_barrier(); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set11, ARRAY_SIZE(set11)); + rcu_barrier(); + mas_empty_area_rev(&mas, 12288, 140014592737280, 0x2000); + MT_BUG_ON(mt, mas.last != 140014592573439); + mtree_destroy(mt); + + mas_reset(&mas); + mas.tree = mt; + count = 0; + mas.index = 0; + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set12, ARRAY_SIZE(set12)); + rcu_barrier(); + mas_for_each(&mas, entry, ULONG_MAX) { + if (xa_is_zero(entry)) + continue; + BUG_ON(count > 12); + count++; + } + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set13, ARRAY_SIZE(set13)); + mtree_erase(mt, 140373516443648); + rcu_read_lock(); + mas_empty_area_rev(&mas, 0, 140373518663680, 4096); + rcu_read_unlock(); + mtree_destroy(mt); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set14, ARRAY_SIZE(set14)); + rcu_barrier(); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set15, ARRAY_SIZE(set15)); + rcu_barrier(); + mtree_destroy(mt); + + /* set16 was to find a bug on limit updating at slot 0. */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set16, ARRAY_SIZE(set16)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 139921865637888, 0x6000); + MT_BUG_ON(mt, mas.last != 139921865547775); + mt_set_non_kernel(0); + mtree_destroy(mt); + + /* + * set17 found a bug in walking backwards and not counting nulls at + * the end. This could cause a gap to be missed if the null had any + * size. + */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set17, ARRAY_SIZE(set17)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 139953197334528, 0x1000); + MT_BUG_ON(mt, mas.last != 139953197322239); +/* MT_BUG_ON(mt, mas.index != 139953197318144); */ + mt_set_non_kernel(0); + mtree_destroy(mt); + + /* + * set18 found a bug in walking backwards and not setting the max from + * the node, but using the parent node. This was only an issue if the + * next slot in the parent had what we needed. + */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set18, ARRAY_SIZE(set18)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 140222972858368, 2215936); + MT_BUG_ON(mt, mas.last != 140222968475647); + /*MT_BUG_ON(mt, mas.index != 140222966259712); */ + mt_set_non_kernel(0); + mtree_destroy(mt); + + /* + * set19 found 2 bugs in prev. + * 1. If we hit root without finding anything, then there was an + * infinite loop. + * 2. The first ascending wasn't using the correct slot which may have + * have caused missed entries. + */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set19, ARRAY_SIZE(set19)); + rcu_barrier(); + mas.index = 140656779083776; + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != xa_mk_value(140656779083776)); + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != xa_mk_value(140656766251008)); + mt_set_non_kernel(0); + mtree_destroy(mt); + + /* + * set20 found a bug in mas_may_move_gap due to the slot being + * overwritten during the __mas_add operation and setting it to zero. + */ + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set20, ARRAY_SIZE(set20)); + rcu_barrier(); + check_load(mt, 94849009414144, NULL); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set21, ARRAY_SIZE(set21)); + rcu_barrier(); + mt_validate(mt); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_set_non_kernel(999); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set22, ARRAY_SIZE(set22)); + rcu_barrier(); + mt_validate(mt); + ptr = mtree_load(mt, 140551363362816); + MT_BUG_ON(mt, ptr == mtree_load(mt, 140551363420159)); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set23, ARRAY_SIZE(set23)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set24, ARRAY_SIZE(set24)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set25, ARRAY_SIZE(set25)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* Split on NULL followed by delete - causes gap issues. */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set26, ARRAY_SIZE(set26)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 140109042671616, 409600); + MT_BUG_ON(mt, mas.last != 140109040959487); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* Split on NULL followed by delete - causes gap issues. */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set27, ARRAY_SIZE(set27)); + rcu_barrier(); + MT_BUG_ON(mt, 0 != mtree_load(mt, 140415537422336)); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set28, ARRAY_SIZE(set28)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 139918413357056, 2097152); + /* Search for the size of gap then align it (offset 0) */ + mas.index = (mas.last + 1 - 2097152 - 0) & (~2093056); + MT_BUG_ON(mt, mas.index != 139918401601536); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* This test found issues with retry moving rebalanced nodes so the + * incorrect parent pivot was updated. + */ + mt_set_non_kernel(999); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set29, ARRAY_SIZE(set29)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* This test found issues with deleting all entries in a node when + * surrounded by entries in the next nodes, then deleting the entries + * surrounding the node filled with deleted entries. + */ + mt_set_non_kernel(999); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set30, ARRAY_SIZE(set30)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* This test found an issue with deleting all entries in a node that was + * the end node and mas_gap incorrectly set next = curr, and curr = prev + * then moved next to the left, losing data. + */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set31, ARRAY_SIZE(set31)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set32, ARRAY_SIZE(set32)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + +/* + * mmap: empty_area_topdown: ffff88821c9cb600 Gap was found: + * mt 140582827569152 gap_end 140582869532672 + * mmap: window was 140583656296448 - 4096 size 134217728 + * mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879 + * mmap: mas.index 140582827569152 align mask 0 offset 0 + * mmap: rb_find_vma find on + * 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00) + */ + + /* move gap failed due to an entirely empty node */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set33, ARRAY_SIZE(set33)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 140583656296448, 134217728); + MT_BUG_ON(mt, mas.last != 140583003750399); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* + * Incorrect gap in tree caused by mas_prev not setting the limits + * correctly while walking down. + */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set34, ARRAY_SIZE(set34)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* Empty leaf at the end of a parent caused incorrect gap. */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set35, ARRAY_SIZE(set35)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + mt_set_non_kernel(99); + /* Empty leaf at the end of a parent caused incorrect gap. */ + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set36, ARRAY_SIZE(set36)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set37, ARRAY_SIZE(set37)); + rcu_barrier(); + MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712)); + mt_validate(mt); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set38, ARRAY_SIZE(set38)); + rcu_barrier(); + MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712)); + mt_validate(mt); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set39, ARRAY_SIZE(set39)); + rcu_barrier(); + mt_validate(mt); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set40, ARRAY_SIZE(set40)); + rcu_barrier(); + mt_validate(mt); + mtree_destroy(mt); + + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set41, ARRAY_SIZE(set41)); + rcu_barrier(); + mt_validate(mt); + mtree_destroy(mt); + + /* move gap failed due to an entirely empty node. */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set42, ARRAY_SIZE(set42)); + rcu_barrier(); + mas_empty_area_rev(&mas, 4096, 4052029440, 28672); + MT_BUG_ON(mt, mas.last != 4041211903); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); + + /* gap calc off by one */ + mt_set_non_kernel(99); + mas_reset(&mas); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_erase2_testset(mt, set43, ARRAY_SIZE(set43)); + rcu_barrier(); + mt_set_non_kernel(0); + mt_validate(mt); + mtree_destroy(mt); +} + +static noinline void check_alloc_rev_range(struct maple_tree *mt) +{ + /* + * Generated by: + * cat /proc/self/maps | awk '{print $1}'| + * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}' + */ + + unsigned long range[] = { + /* Inclusive , Exclusive. */ + 0x565234af2000, 0x565234af4000, + 0x565234af4000, 0x565234af9000, + 0x565234af9000, 0x565234afb000, + 0x565234afc000, 0x565234afd000, + 0x565234afd000, 0x565234afe000, + 0x565235def000, 0x565235e10000, + 0x7f36d4bfd000, 0x7f36d4ee2000, + 0x7f36d4ee2000, 0x7f36d4f04000, + 0x7f36d4f04000, 0x7f36d504c000, + 0x7f36d504c000, 0x7f36d5098000, + 0x7f36d5098000, 0x7f36d5099000, + 0x7f36d5099000, 0x7f36d509d000, + 0x7f36d509d000, 0x7f36d509f000, + 0x7f36d509f000, 0x7f36d50a5000, + 0x7f36d50b9000, 0x7f36d50db000, + 0x7f36d50db000, 0x7f36d50dc000, + 0x7f36d50dc000, 0x7f36d50fa000, + 0x7f36d50fa000, 0x7f36d5102000, + 0x7f36d5102000, 0x7f36d5103000, + 0x7f36d5103000, 0x7f36d5104000, + 0x7f36d5104000, 0x7f36d5105000, + 0x7fff5876b000, 0x7fff5878d000, + 0x7fff5878e000, 0x7fff58791000, + 0x7fff58791000, 0x7fff58793000, + }; + + unsigned long holes[] = { + /* + * Note: start of hole is INCLUSIVE + * end of hole is EXCLUSIVE + * (opposite of the above table.) + * Start of hole, end of hole, size of hole (+1) + */ + 0x565234afb000, 0x565234afc000, 0x1000, + 0x565234afe000, 0x565235def000, 0x12F1000, + 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000, + }; + + /* + * req_range consists of 4 values. + * 1. min index + * 2. max index + * 3. size + * 4. number that should be returned. + * 5. return value + */ + unsigned long req_range[] = { + 0x565234af9000, /* Min */ + 0x7fff58791000, /* Max */ + 0x1000, /* Size */ + 0x7fff5878d << 12, /* First rev hole of size 0x1000 */ + 0, /* Return value success. */ + + 0x0, /* Min */ + 0x565234AF1 << 12, /* Max */ + 0x3000, /* Size */ + 0x565234AEE << 12, /* max - 3. */ + 0, /* Return value success. */ + + 0x0, /* Min */ + -1, /* Max */ + 0x1000, /* Size */ + 562949953421311 << 12,/* First rev hole of size 0x1000 */ + 0, /* Return value success. */ + + 0x0, /* Min */ + 0x7F36D510A << 12, /* Max */ + 0x4000, /* Size */ + 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ + 0, /* Return value success. */ + + /* Ascend test. */ + 0x0, + 34148798629 << 12, + 19 << 12, + 34148797418 << 12, + 0x0, + + /* Too big test. */ + 0x0, + 18446744073709551615UL, + 562915594369134UL << 12, + 0x0, + -EBUSY, + + }; + + int i, range_count = ARRAY_SIZE(range); + int req_range_count = ARRAY_SIZE(req_range); + unsigned long min = 0; + + MA_STATE(mas, mt, 0, 0); + + mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY, + GFP_KERNEL); +#define DEBUG_REV_RANGE 0 + for (i = 0; i < range_count; i += 2) { + /* Inclusive, Inclusive (with the -1) */ + +#if DEBUG_REV_RANGE + pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12, + (range[i + 1] >> 12) - 1); +#endif + check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, + xa_mk_value(range[i] >> 12), 0); + mt_validate(mt); + } + + + for (i = 0; i < ARRAY_SIZE(holes); i += 3) { +#if DEBUG_REV_RANGE + pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n", + min, holes[i+1]>>12, holes[i+2]>>12, + holes[i] >> 12); +#endif + MT_BUG_ON(mt, mas_empty_area_rev(&mas, min, + holes[i+1] >> 12, + holes[i+2] >> 12)); +#if DEBUG_REV_RANGE + pr_debug("Found %lu %lu\n", mas.index, mas.last); + pr_debug("gap %lu %lu\n", (holes[i] >> 12), + (holes[i+1] >> 12)); +#endif + MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12)); + MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12)); + min = holes[i+1] >> 12; + mas_reset(&mas); + } + + for (i = 0; i < req_range_count; i += 5) { +#if DEBUG_REV_RANGE + pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", + req_range[i] >> 12, + (req_range[i + 1] >> 12) - 1, + req_range[i+2] >> 12, + req_range[i+3] >> 12); +#endif + check_mtree_alloc_rrange(mt, + req_range[i] >> 12, /* start */ + req_range[i+1] >> 12, /* end */ + req_range[i+2] >> 12, /* size */ + req_range[i+3] >> 12, /* expected address */ + req_range[i+4], /* expected return */ + xa_mk_value(req_range[i] >> 12)); /* pointer */ + mt_validate(mt); + } + + mt_set_non_kernel(1); + mtree_erase(mt, 34148798727); /* create a deleted range. */ + check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414, + 34148798725, 0, mt); + + mtree_destroy(mt); +} + +static noinline void check_alloc_range(struct maple_tree *mt) +{ + /* + * Generated by: + * cat /proc/self/maps|awk '{print $1}'| + * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}' + */ + + unsigned long range[] = { + /* Inclusive , Exclusive. */ + 0x565234af2000, 0x565234af4000, + 0x565234af4000, 0x565234af9000, + 0x565234af9000, 0x565234afb000, + 0x565234afc000, 0x565234afd000, + 0x565234afd000, 0x565234afe000, + 0x565235def000, 0x565235e10000, + 0x7f36d4bfd000, 0x7f36d4ee2000, + 0x7f36d4ee2000, 0x7f36d4f04000, + 0x7f36d4f04000, 0x7f36d504c000, + 0x7f36d504c000, 0x7f36d5098000, + 0x7f36d5098000, 0x7f36d5099000, + 0x7f36d5099000, 0x7f36d509d000, + 0x7f36d509d000, 0x7f36d509f000, + 0x7f36d509f000, 0x7f36d50a5000, + 0x7f36d50b9000, 0x7f36d50db000, + 0x7f36d50db000, 0x7f36d50dc000, + 0x7f36d50dc000, 0x7f36d50fa000, + 0x7f36d50fa000, 0x7f36d5102000, + 0x7f36d5102000, 0x7f36d5103000, + 0x7f36d5103000, 0x7f36d5104000, + 0x7f36d5104000, 0x7f36d5105000, + 0x7fff5876b000, 0x7fff5878d000, + 0x7fff5878e000, 0x7fff58791000, + 0x7fff58791000, 0x7fff58793000, + }; + unsigned long holes[] = { + /* Start of hole, end of hole, size of hole (+1) */ + 0x565234afb000, 0x565234afc000, 0x1000, + 0x565234afe000, 0x565235def000, 0x12F1000, + 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000, + }; + + /* + * req_range consists of 4 values. + * 1. min index + * 2. max index + * 3. size + * 4. number that should be returned. + * 5. return value + */ + unsigned long req_range[] = { + 0x565234af9000, /* Min */ + 0x7fff58791000, /* Max */ + 0x1000, /* Size */ + 0x565234afb000, /* First hole in our data of size 1000. */ + 0, /* Return value success. */ + + 0x0, /* Min */ + 0x7fff58791000, /* Max */ + 0x1F00, /* Size */ + 0x0, /* First hole in our data of size 2000. */ + 0, /* Return value success. */ + + /* Test ascend. */ + 34148797436 << 12, /* Min */ + 0x7fff587AF000, /* Max */ + 0x3000, /* Size */ + 34148798629 << 12, /* Expected location */ + 0, /* Return value success. */ + + /* Test failing. */ + 34148798623 << 12, /* Min */ + 34148798683 << 12, /* Max */ + 0x15000, /* Size */ + 0, /* Expected location */ + -EBUSY, /* Return value failed. */ + + /* Test filling entire gap. */ + 34148798623 << 12, /* Min */ + 0x7fff587AF000, /* Max */ + 0x10000, /* Size */ + 34148798632 << 12, /* Expected location */ + 0, /* Return value success. */ + + /* Test walking off the end of root. */ + 0, /* Min */ + -1, /* Max */ + -1, /* Size */ + 0, /* Expected location */ + -EBUSY, /* Return value failure. */ + + /* Test looking for too large a hole across entire range. */ + 0, /* Min */ + -1, /* Max */ + 4503599618982063UL << 12, /* Size */ + 34359052178 << 12, /* Expected location */ + -EBUSY, /* Return failure. */ + }; + int i, range_count = ARRAY_SIZE(range); + int req_range_count = ARRAY_SIZE(req_range); + unsigned long min = 0x565234af2000; + + mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY, + GFP_KERNEL); + for (i = 0; i < range_count; i += 2) { +#define DEBUG_ALLOC_RANGE 0 +#if DEBUG_ALLOC_RANGE + pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, + (range[i + 1] >> 12) - 1); + mt_dump(mt); +#endif + check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, + xa_mk_value(range[i] >> 12), 0); + mt_validate(mt); + } + + + MA_STATE(mas, mt, 0, 0); + + for (i = 0; i < ARRAY_SIZE(holes); i += 3) { + +#if DEBUG_ALLOC_RANGE + pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12, + holes[i+1] >> 12, holes[i+2] >> 12, + min, holes[i+1]); +#endif + MT_BUG_ON(mt, mas_empty_area(&mas, min >> 12, + holes[i+1] >> 12, + holes[i+2] >> 12)); + MT_BUG_ON(mt, mas.index != holes[i] >> 12); + min = holes[i+1]; + mas_reset(&mas); + } + for (i = 0; i < req_range_count; i += 5) { +#if DEBUG_ALLOC_RANGE + pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n", + i/5, req_range[i] >> 12, req_range[i + 1] >> 12, + req_range[i + 2] >> 12, req_range[i + 3] >> 12, + req_range[i], req_range[i+1]); +#endif + check_mtree_alloc_range(mt, + req_range[i] >> 12, /* start */ + req_range[i+1] >> 12, /* end */ + req_range[i+2] >> 12, /* size */ + req_range[i+3] >> 12, /* expected address */ + req_range[i+4], /* expected return */ + xa_mk_value(req_range[i] >> 12)); /* pointer */ + mt_validate(mt); +#if DEBUG_ALLOC_RANGE + mt_dump(mt); +#endif + } + + mtree_destroy(mt); +} + +static noinline void check_ranges(struct maple_tree *mt) +{ + int i, val, val2; + unsigned long r[] = { + 10, 15, + 20, 25, + 17, 22, /* Overlaps previous range. */ + 9, 1000, /* Huge. */ + 100, 200, + 45, 168, + 118, 128, + }; + + MT_BUG_ON(mt, !mtree_empty(mt)); + check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0); + check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0); + check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST); + MT_BUG_ON(mt, !mt_height(mt)); + /* Store */ + check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0); + check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0); + check_store_range(mt, r[8], r[9], xa_mk_value(r[8]), 0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + MT_BUG_ON(mt, mt_height(mt)); + + check_seq(mt, 50, false); + mt_set_non_kernel(4); + check_store_range(mt, 5, 47, xa_mk_value(47), 0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + /* Create tree of 1-100 */ + check_seq(mt, 100, false); + /* Store 45-168 */ + mt_set_non_kernel(10); + check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + /* Create tree of 1-200 */ + check_seq(mt, 200, false); + /* Store 45-168 */ + check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + check_seq(mt, 30, false); + check_store_range(mt, 6, 18, xa_mk_value(6), 0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + /* Overwrite across multiple levels. */ + /* Create tree of 1-400 */ + check_seq(mt, 400, false); + mt_set_non_kernel(50); + /* Store 118-128 */ + check_store_range(mt, r[12], r[13], xa_mk_value(r[12]), 0); + mt_set_non_kernel(50); + mtree_test_erase(mt, 140); + mtree_test_erase(mt, 141); + mtree_test_erase(mt, 142); + mtree_test_erase(mt, 143); + mtree_test_erase(mt, 130); + mtree_test_erase(mt, 131); + mtree_test_erase(mt, 132); + mtree_test_erase(mt, 133); + mtree_test_erase(mt, 134); + mtree_test_erase(mt, 135); + check_load(mt, r[12], xa_mk_value(r[12])); + check_load(mt, r[13], xa_mk_value(r[12])); + check_load(mt, r[13] - 1, xa_mk_value(r[12])); + check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1)); + check_load(mt, 135, NULL); + check_load(mt, 140, NULL); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + + + /* Overwrite multiple levels at the end of the tree (slot 7) */ + mt_set_non_kernel(50); + check_seq(mt, 400, false); + check_store_range(mt, 353, 361, xa_mk_value(353), 0); + check_store_range(mt, 347, 352, xa_mk_value(347), 0); + + check_load(mt, 346, xa_mk_value(346)); + for (i = 347; i <= 352; i++) + check_load(mt, i, xa_mk_value(347)); + for (i = 353; i <= 361; i++) + check_load(mt, i, xa_mk_value(353)); + check_load(mt, 362, xa_mk_value(362)); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + mt_set_non_kernel(50); + check_seq(mt, 400, false); + check_store_range(mt, 352, 364, NULL, 0); + check_store_range(mt, 351, 363, xa_mk_value(352), 0); + check_load(mt, 350, xa_mk_value(350)); + check_load(mt, 351, xa_mk_value(352)); + for (i = 352; i <= 363; i++) + check_load(mt, i, xa_mk_value(352)); + check_load(mt, 364, NULL); + check_load(mt, 365, xa_mk_value(365)); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + mt_set_non_kernel(5); + check_seq(mt, 400, false); + check_store_range(mt, 352, 364, NULL, 0); + check_store_range(mt, 351, 364, xa_mk_value(352), 0); + check_load(mt, 350, xa_mk_value(350)); + check_load(mt, 351, xa_mk_value(352)); + for (i = 352; i <= 364; i++) + check_load(mt, i, xa_mk_value(352)); + check_load(mt, 365, xa_mk_value(365)); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + + mt_set_non_kernel(50); + check_seq(mt, 400, false); + check_store_range(mt, 362, 367, xa_mk_value(362), 0); + check_store_range(mt, 353, 361, xa_mk_value(353), 0); + mt_set_non_kernel(0); + mt_validate(mt); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + /* + * Interesting cases: + * 1. Overwrite the end of a node and end in the first entry of the next + * node. + * 2. Split a single range + * 3. Overwrite the start of a range + * 4. Overwrite the end of a range + * 5. Overwrite the entire range + * 6. Overwrite a range that causes multiple parent nodes to be + * combined + * 7. Overwrite a range that causes multiple parent nodes and part of + * root to be combined + * 8. Overwrite the whole tree + * 9. Try to overwrite the zero entry of an alloc tree. + * 10. Write a range larger than a nodes current pivot + */ + + mt_set_non_kernel(50); + for (i = 0; i <= 500; i++) { + val = i*5; + val2 = (i+1)*5; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0); + check_store_range(mt, 2411, 2411, xa_mk_value(2411), 0); + check_store_range(mt, 2412, 2412, xa_mk_value(2412), 0); + check_store_range(mt, 2396, 2400, xa_mk_value(4052020), 0); + check_store_range(mt, 2402, 2402, xa_mk_value(2402), 0); + mtree_destroy(mt); + mt_set_non_kernel(0); + + mt_set_non_kernel(50); + for (i = 0; i <= 500; i++) { + val = i*5; + val2 = (i+1)*5; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0); + check_store_range(mt, 2424, 2424, xa_mk_value(2424), 0); + check_store_range(mt, 2425, 2425, xa_mk_value(2), 0); + check_store_range(mt, 2460, 2470, NULL, 0); + check_store_range(mt, 2435, 2460, xa_mk_value(2435), 0); + check_store_range(mt, 2461, 2470, xa_mk_value(2461), 0); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + /* Test rebalance gaps */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mt_set_non_kernel(50); + for (i = 0; i <= 50; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 161, 161, xa_mk_value(161), 0); + check_store_range(mt, 162, 162, xa_mk_value(162), 0); + check_store_range(mt, 163, 163, xa_mk_value(163), 0); + check_store_range(mt, 240, 249, NULL, 0); + mtree_erase(mt, 200); + mtree_erase(mt, 210); + mtree_erase(mt, 220); + mtree_erase(mt, 230); + mt_set_non_kernel(0); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= 500; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 4600, 4959, xa_mk_value(1), 0); + mt_validate(mt); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= 500; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0); + check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0); + check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0); + check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0); + check_store_range(mt, 4842, 4849, NULL, 0); + mt_validate(mt); + MT_BUG_ON(mt, !mt_height(mt)); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= 1300; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + MT_BUG_ON(mt, mt_height(mt) >= 4); + } + /* Cause a 3 child split all the way up the tree. */ + for (i = 5; i < 215; i += 10) + check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0); + for (i = 5; i < 65; i += 10) + check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0); + + MT_BUG_ON(mt, mt_height(mt) >= 4); + for (i = 5; i < 45; i += 10) + check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0); + MT_BUG_ON(mt, mt_height(mt) < 4); + mtree_destroy(mt); + + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= 1200; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + MT_BUG_ON(mt, mt_height(mt) >= 4); + } + /* Fill parents and leaves before split. */ + for (i = 5; i < 455; i += 10) + check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0); + + for (i = 1; i < 16; i++) + check_store_range(mt, 8185 + i, 8185 + i + 1, + xa_mk_value(8185+i), 0); + MT_BUG_ON(mt, mt_height(mt) >= 4); + /* triple split across multiple levels. */ + check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0); + MT_BUG_ON(mt, mt_height(mt) != 4); +} + +static noinline void check_next_entry(struct maple_tree *mt) +{ + void *entry = NULL; + unsigned long limit = 30, i = 0; + + MT_BUG_ON(mt, !mtree_empty(mt)); + MA_STATE(mas, mt, i, i); + + check_seq(mt, limit, false); + rcu_read_lock(); + + // Check the first one and get ma_state in the correct state. + MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++)); + for ( ; i <= limit + 1; i++) { + entry = mas_next(&mas, limit); + if (i > limit) + MT_BUG_ON(mt, entry != NULL); + else + MT_BUG_ON(mt, xa_mk_value(i) != entry); + } + rcu_read_unlock(); + mtree_destroy(mt); +} + +static noinline void check_prev_entry(struct maple_tree *mt) +{ + unsigned long index = 16; + void *value; + int i; + + MA_STATE(mas, mt, index, index); + + MT_BUG_ON(mt, !mtree_empty(mt)); + check_seq(mt, 30, false); + + rcu_read_lock(); + value = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, value != xa_mk_value(index)); + value = mas_prev(&mas, 0); + MT_BUG_ON(mt, value != xa_mk_value(index - 1)); + rcu_read_unlock(); + mtree_destroy(mt); + + /* Check limits on prev */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mas_lock(&mas); + for (i = 0; i <= index; i++) { + mas_set_range(&mas, i*10, i*10+5); + mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL); + } + + mas_set(&mas, 20); + value = mas_walk(&mas); + MT_BUG_ON(mt, value != xa_mk_value(2)); + + value = mas_prev(&mas, 19); + MT_BUG_ON(mt, value != NULL); + + mas_set(&mas, 80); + value = mas_walk(&mas); + MT_BUG_ON(mt, value != xa_mk_value(8)); + + value = mas_prev(&mas, 76); + MT_BUG_ON(mt, value != NULL); + + mas_unlock(&mas); +} + +static noinline void check_root_expand(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 0); + void *ptr; + + + mas_lock(&mas); + mas_set(&mas, 3); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + + ptr = &check_prev_entry; + mas_set(&mas, 1); + mas_store_gfp(&mas, ptr, GFP_KERNEL); + + mas_set(&mas, 0); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != NULL); + + mas_set(&mas, 1); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != &check_prev_entry); + + mas_set(&mas, 2); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != NULL); + mas_unlock(&mas); + mtree_destroy(mt); + + + mt_init_flags(mt, 0); + mas_lock(&mas); + + mas_set(&mas, 0); + ptr = &check_prev_entry; + mas_store_gfp(&mas, ptr, GFP_KERNEL); + + mas_set(&mas, 5); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + + mas_set_range(&mas, 0, 100); + ptr = mas_walk(&mas); + MT_BUG_ON(mt, ptr != &check_prev_entry); + MT_BUG_ON(mt, mas.last != 0); + mas_unlock(&mas); + mtree_destroy(mt); + + mt_init_flags(mt, 0); + mas_lock(&mas); + + mas_set(&mas, 0); + ptr = (void*)((unsigned long) check_prev_entry | 1UL); + mas_store_gfp(&mas, ptr, GFP_KERNEL); + ptr = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, ptr != NULL); + MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX)); + + mas_set(&mas, 1); + ptr = mas_prev(&mas, 0); + MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0)); + MT_BUG_ON(mt, ptr != (void*)((unsigned long) check_prev_entry | 1UL)); + + mas_unlock(&mas); + + mtree_destroy(mt); + + mt_init_flags(mt, 0); + mas_lock(&mas); + mas_set(&mas, 0); + ptr = (void*)((unsigned long) check_prev_entry | 2UL); + mas_store_gfp(&mas, ptr, GFP_KERNEL); + ptr = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, ptr != NULL); + MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX)); + + mas_set(&mas, 1); + ptr = mas_prev(&mas, 0); + MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0)); + MT_BUG_ON(mt, ptr != (void*)((unsigned long) check_prev_entry | 2UL)); + + + mas_unlock(&mas); +} + +static noinline void check_prealloc(struct maple_tree *mt) +{ + unsigned long i, max = 100; + unsigned long allocated; + unsigned char height; + struct maple_node *mn; + void *ptr= check_prealloc; + MA_STATE(mas, mt, 10, 20); + + mt_set_non_kernel(1000); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_destroy(&mas); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + mas_destroy(&mas); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); + ma_free_rcu(mn); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + mas_destroy(&mas); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + mas_destroy(&mas); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + ma_free_rcu(mn); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != allocated); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + mas_destroy(&mas); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_store_prealloc(&mas, ptr); + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mt_set_non_kernel(1); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated != 0); + mas_destroy(&mas); + + + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated == 0); + MT_BUG_ON(mt, allocated != 1 + height * 3); + mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mt_set_non_kernel(1); + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); + allocated = mas_allocated(&mas); + height = mas_mt_height(&mas); + MT_BUG_ON(mt, allocated != 0); +} + +static noinline void check_spanning_write(struct maple_tree *mt) +{ + unsigned long i, max = 5000; + MA_STATE(mas, mt, 1200, 2380); + + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 1205); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning store that requires a right cousin rebalance */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 0, 12900); /* Spans more than 2 levels */ + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 1205); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test non-alloc tree spanning store */ + mt_init_flags(mt, 0); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 0, 300); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 15); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning store that requires a right sibling rebalance */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 0, 12865); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 15); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning store that requires a left sibling rebalance */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 90, 13665); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 95); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning store that requires a left cousin rebalance */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 46805, 49995); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 46815); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* + * Test spanning store that requires a left cousin rebalance all the way + * to root + */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mas_set_range(&mas, 32395, 49995); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 46815); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* + * Test spanning store that requires a right cousin rebalance all the + * way to root + */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + mas_set_range(&mas, 38875, 43190); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 38900); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning store ending at full node (depth 2)*/ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + mtree_lock(mt); + mas_set(&mas, 47606); + mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL); + mas_set(&mas, 47607); + mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL); + mas_set(&mas, 47608); + mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL); + mas_set(&mas, 47609); + mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL); + /* Ensure the parent node is full */ + mas_ascend(&mas); + MT_BUG_ON(mt, (mas_data_end(&mas)) != mt_slot_count(mas.node) - 1); + mas_set_range(&mas, 11516,48940); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mtree_unlock(mt); + mtree_destroy(mt); + + /* Test spanning write with many many levels of no siblings */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + mas_set_range(&mas, 43200, 49999); + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 43200); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mtree_destroy(mt); +} + +static noinline void check_null_expand(struct maple_tree *mt) +{ + unsigned long i, max = 100; + unsigned char data_end; + MA_STATE(mas, mt, 959, 959); + + for (i = 0; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + /* Test expanding null at start. */ + mas_walk(&mas); + data_end = mas_data_end(&mas); + mas_set_range(&mas, 959, 963); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + MT_BUG_ON(mt, mtree_load(mt, 963) != NULL); + MT_BUG_ON(mt, data_end != mas_data_end(&mas)); + + /* Test expanding null at end. */ + mas_set(&mas, 880); + mas_walk(&mas); + data_end = mas_data_end(&mas); + mas_set_range(&mas, 884, 887); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + MT_BUG_ON(mt, mtree_load(mt, 884) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 889) != NULL); + MT_BUG_ON(mt, data_end != mas_data_end(&mas)); + + /* Test expanding null at start and end. */ + mas_set(&mas, 890); + mas_walk(&mas); + data_end = mas_data_end(&mas); + mas_set_range(&mas, 900, 905); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + MT_BUG_ON(mt, mtree_load(mt, 899) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 900) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 905) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 906) != NULL); + MT_BUG_ON(mt, data_end - 2 != mas_data_end(&mas)); + + /* Test expanding null across multiple slots. */ + mas_set(&mas, 800); + mas_walk(&mas); + data_end = mas_data_end(&mas); + mas_set_range(&mas, 810, 825); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + MT_BUG_ON(mt, mtree_load(mt, 809) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 810) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 825) != NULL); + MT_BUG_ON(mt, mtree_load(mt, 826) != NULL); + MT_BUG_ON(mt, data_end - 4 != mas_data_end(&mas)); +} + +static noinline void check_gap_combining(struct maple_tree *mt) +{ + struct maple_enode *mn1, *mn2; + void *entry; + + unsigned long seq100[] = { + /* 0-5 */ + 74, 75, 76, + 50, 100, 2, + + /* 6-12 */ + 44, 45, 46, 43, + 20, 50, 3, + + /* 13-20*/ + 80, 81, 82, + 76, 2, 79, 85, 4, + }; + unsigned long seq2000[] = { + 1152, 1151, + 1100, 1200, 2, + }; + unsigned long seq400[] = { + 286, 318, + 256, 260, 266, 270, 275, 280, 290, 398, + 286, 310, + }; + + unsigned long index = seq100[0]; + + MA_STATE(mas, mt, index, index); + + MT_BUG_ON(mt, !mtree_empty(mt)); + check_seq(mt, 100, false); /* create 100 singletons. */ + + mt_set_non_kernel(1); + mtree_test_erase(mt, seq100[2]); + check_load(mt, seq100[2], NULL); + mtree_test_erase(mt, seq100[1]); + check_load(mt, seq100[1], NULL); + + rcu_read_lock(); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != xa_mk_value(index)); + mn1 = mas.node; + mas_next(&mas, ULONG_MAX); + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != xa_mk_value(index + 4)); + mn2 = mas.node; + MT_BUG_ON(mt, mn1 == mn2); /* test the test. */ + + /* + * At this point, there is a gap of 2 at index + 1 between seq100[3] and + * seq100[4]. Search for the gap. + */ + mt_set_non_kernel(1); + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[3], seq100[4], + seq100[5])); + MT_BUG_ON(mt, mas.index != index + 1); + rcu_read_unlock(); + + mtree_test_erase(mt, seq100[6]); + check_load(mt, seq100[6], NULL); + mtree_test_erase(mt, seq100[7]); + check_load(mt, seq100[7], NULL); + mtree_test_erase(mt, seq100[8]); + index = seq100[9]; + + rcu_read_lock(); + mas.index = index; + mas.last = index; + mas_reset(&mas); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != xa_mk_value(index)); + mn1 = mas.node; + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != xa_mk_value(index + 4)); + mas_next(&mas, ULONG_MAX); /* go to the next entry. */ + mn2 = mas.node; + MT_BUG_ON(mt, mn1 == mn2); /* test the next entry is in the next node. */ + + /* + * At this point, there is a gap of 3 at seq100[6]. Find it by + * searching 20 - 50 for size 3. + */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[10], seq100[11], + seq100[12])); + MT_BUG_ON(mt, mas.index != seq100[6]); + rcu_read_unlock(); + + mt_set_non_kernel(1); + mtree_store(mt, seq100[13], NULL, GFP_KERNEL); + check_load(mt, seq100[13], NULL); + check_load(mt, seq100[14], xa_mk_value(seq100[14])); + mtree_store(mt, seq100[14], NULL, GFP_KERNEL); + check_load(mt, seq100[13], NULL); + check_load(mt, seq100[14], NULL); + + mas_reset(&mas); + rcu_read_lock(); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[15], + seq100[17])); + MT_BUG_ON(mt, mas.index != seq100[13]); + mt_validate(mt); + rcu_read_unlock(); + + /* + * *DEPRECATED: no retries anymore* Test retry entry in the start of a + * gap. */ + mt_set_non_kernel(2); + mtree_test_store_range(mt, seq100[18], seq100[14], NULL); + mtree_test_erase(mt, seq100[15]); + mas_reset(&mas); + rcu_read_lock(); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[19], + seq100[20])); + rcu_read_unlock(); + MT_BUG_ON(mt, mas.index != seq100[18]); + mt_validate(mt); + mtree_destroy(mt); + + /* seq 2000 tests are for multi-level tree gaps */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_seq(mt, 2000, false); + mt_set_non_kernel(1); + mtree_test_erase(mt, seq2000[0]); + mtree_test_erase(mt, seq2000[1]); + + mt_set_non_kernel(2); + mas_reset(&mas); + rcu_read_lock(); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq2000[2], seq2000[3], + seq2000[4])); + MT_BUG_ON(mt, mas.index != seq2000[1]); + rcu_read_unlock(); + mt_validate(mt); + mtree_destroy(mt); + + /* seq 400 tests rebalancing over two levels. */ + mt_set_non_kernel(99); + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_seq(mt, 400, false); + mtree_test_store_range(mt, seq400[0], seq400[1], NULL); + mt_set_non_kernel(0); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + check_seq(mt, 400, false); + mt_set_non_kernel(50); + mtree_test_store_range(mt, seq400[2], seq400[9], + xa_mk_value(seq400[2])); + mtree_test_store_range(mt, seq400[3], seq400[9], + xa_mk_value(seq400[3])); + mtree_test_store_range(mt, seq400[4], seq400[9], + xa_mk_value(seq400[4])); + mtree_test_store_range(mt, seq400[5], seq400[9], + xa_mk_value(seq400[5])); + mtree_test_store_range(mt, seq400[0], seq400[9], + xa_mk_value(seq400[0])); + mtree_test_store_range(mt, seq400[6], seq400[9], + xa_mk_value(seq400[6])); + mtree_test_store_range(mt, seq400[7], seq400[9], + xa_mk_value(seq400[7])); + mtree_test_store_range(mt, seq400[8], seq400[9], + xa_mk_value(seq400[8])); + mtree_test_store_range(mt, seq400[10], seq400[11], + xa_mk_value(seq400[10])); + mt_validate(mt); + mt_set_non_kernel(0); + mtree_destroy(mt); +} +static noinline void check_node_overwrite(struct maple_tree *mt) +{ + int i, max = 4000; + + for (i = 0; i < max; i++) { + mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100)); + } + + mtree_test_store_range(mt, 319951, 367950, NULL); + /*mt_dump(mt); */ + mt_validate(mt); +} + +static void mas_dfs_preorder(struct ma_state *mas) +{ + + struct maple_enode *prev; + unsigned char end, slot = 0; + + if (mas_is_start(mas)) { + mas_start(mas); + return; + } + + if (mte_is_leaf(mas->node) && mte_is_root(mas->node)) + goto done; + +walk_up: + end = mas_data_end(mas); + if (mte_is_leaf(mas->node) || + (slot > end)) { + if (mte_is_root(mas->node)) + goto done; + + slot = mte_parent_slot(mas->node) + 1; + mas_ascend(mas); + goto walk_up; + } + + prev = mas->node; + mas->node = mas_get_slot(mas, slot); + if (!mas->node || slot > end) { + if (mte_is_root(prev)) + goto done; + + mas->node = prev; + slot = mte_parent_slot(mas->node) + 1; + mas_ascend(mas); + goto walk_up; + } + + return; +done: + mas->node = MAS_NONE; +} + + +static void check_dfs_preorder(struct maple_tree *mt) +{ + unsigned long count = 0, max = 1000; + + MA_STATE(mas, mt, 0, 0); + + check_seq(mt, max, false); + do { + count++; + mas_dfs_preorder(&mas); + } while (!mas_is_none(&mas)); + MT_BUG_ON(mt, count != 74); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mas_reset(&mas); + count = 0; + check_seq(mt, max, false); + do { + count++; + mas_dfs_preorder(&mas); + } while (!mas_is_none(&mas)); + /*printk("count %lu\n", count); */ + MT_BUG_ON(mt, count != 77); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mas_reset(&mas); + count = 0; + check_rev_seq(mt, max, false); + do { + count++; + mas_dfs_preorder(&mas); + } while (!mas_is_none(&mas)); + /*printk("count %lu\n", count); */ + MT_BUG_ON(mt, count != 77); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mas_reset(&mas); + mt_zero_nr_tallocated(); + mt_set_non_kernel(200); + mas_expected_entries(&mas, max); + for (count = 0; count <= max; count++) { + mas.index = mas.last = count; + mas_store(&mas, xa_mk_value(count)); + MT_BUG_ON(mt, mas_is_err(&mas)); + } + mas_destroy(&mas); + rcu_barrier(); + /* + * pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n", + * max, mt_get_alloc_size()/1024, mt_nr_allocated(), + * mt_nr_tallocated()); + */ + +} + +#if defined(BENCH_SLOT_STORE) +static noinline void bench_slot_store(struct maple_tree *mt) +{ + int i, brk = 105, max = 1040, brk_start = 100, count = 20000000; + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) { + mtree_store_range(mt, brk, brk, NULL, GFP_KERNEL); + mtree_store_range(mt, brk_start, brk, xa_mk_value(brk), + GFP_KERNEL); + } +} +#endif + +#if defined(BENCH_NODE_STORE) +static noinline void bench_node_store(struct maple_tree *mt) +{ + int i, overwrite = 76, max = 240, count = 20000000; + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) { + mtree_store_range(mt, overwrite, overwrite + 15, + xa_mk_value(overwrite), GFP_KERNEL); + + overwrite += 5; + if (overwrite >= 135) + overwrite = 76; + } +} +#endif + +#if defined(BENCH_AWALK) +static noinline void bench_awalk(struct maple_tree *mt) +{ + int i, max = 2500, count = 50000000; + MA_STATE(mas, mt, 1470, 1470); + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL); + + for (i = 0; i < count; i++) { + mas_empty_area_rev(&mas, 0, 2000, 10); + mas_reset(&mas); + } +} +#endif +#if defined(BENCH_WALK) +static noinline void bench_walk(struct maple_tree *mt) +{ + int i, max = 2500, count = 550000000; + MA_STATE(mas, mt, 1470, 1470); + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) { +#if 0 + mtree_load(mt, 1470); +#else + mas_walk(&mas); +#endif + mas_reset(&mas); + } + +} +#endif + +#if defined(BENCH_MT_FOR_EACH) +static noinline void bench_mt_for_each(struct maple_tree *mt) +{ + int i, count = 1000000; + unsigned long max = 2500, index = 0; + void *entry; + + for (i = 0; i < max; i += 5) + mtree_store_range(mt, i, i + 4, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) { + unsigned long j = 0; + mt_for_each(mt, entry, index, max) { + MT_BUG_ON(mt, entry != xa_mk_value(j)); + j+=5; + } + + index = 0; + } + +} +#endif + +static noinline void check_forking(struct maple_tree *mt) +{ + + struct maple_tree newmt; + int i, nr_entries = 134; + void *val; + MA_STATE(mas, mt, 0, 0); + MA_STATE(newmas, mt, 0, 0); + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + mt_set_non_kernel(99999); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + newmas.tree = &newmt; + mas_reset(&newmas); + mas_reset(&mas); + mas.index = 0; + mas.last = 0; + if (mas_expected_entries(&newmas, nr_entries)) { + pr_err("OOM!"); + BUG_ON(1); + } + mas_for_each(&mas, val, ULONG_MAX) { + newmas.index = mas.index; + newmas.last = mas.last; + mas_store(&newmas, val); + } + mas_destroy(&newmas); + mt_validate(&newmt); + mt_set_non_kernel(0); + mtree_destroy(&newmt); +} + +static noinline void check_mas_store_gfp(struct maple_tree *mt) +{ + + struct maple_tree newmt; + int i, nr_entries = 135; + void *val; + MA_STATE(mas, mt, 0, 0); + MA_STATE(newmas, mt, 0, 0); + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + mt_set_non_kernel(99999); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + newmas.tree = &newmt; + mas_reset(&newmas); + mas_set(&mas, 0); + mas_for_each(&mas, val, ULONG_MAX) { + newmas.index = mas.index; + newmas.last = mas.last; + mas_store_gfp(&newmas, val, GFP_KERNEL); + } + + mt_validate(&newmt); + mt_set_non_kernel(0); + mtree_destroy(&newmt); +} + +#if defined(BENCH_FORK) +static noinline void bench_forking(struct maple_tree *mt) +{ + + struct maple_tree newmt; + int i, nr_entries = 134, nr_fork = 80000; + void *val; + MA_STATE(mas, mt, 0, 0); + MA_STATE(newmas, mt, 0, 0); + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < nr_fork; i++) { + mt_set_non_kernel(99999); + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); + newmas.tree = &newmt; + mas_reset(&newmas); + mas_reset(&mas); + mas.index = 0; + mas.last = 0; + if (mas_expected_entries(&newmas, nr_entries)) { + printk("OOM!"); + BUG_ON(1); + } + mas_for_each(&mas, val, ULONG_MAX) { + newmas.index = mas.index; + newmas.last = mas.last; + mas_store(&newmas, val); + } + mas_destroy(&newmas); + mt_validate(&newmt); + mt_set_non_kernel(0); + mtree_destroy(&newmt); + } +} +#endif + +static noinline void next_prev_test(struct maple_tree *mt) +{ + int i, nr_entries = 200; + void *val; + MA_STATE(mas, mt, 0, 0); + struct maple_enode *mn; + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i <= nr_entries / 2; i++) { + mas_next(&mas, 1000); + if (mas_is_none(&mas)) + break; + + } + mas_reset(&mas); + mas_set(&mas, 0); + i = 0; + mas_for_each(&mas, val, 1000) { + i++; + } + + mas_reset(&mas); + mas_set(&mas, 0); + i = 0; + mas_for_each(&mas, val, 1000) { + mas_pause(&mas); + i++; + } + + /* + * 680 - 685 = 0x61a00001930c + * 686 - 689 = NULL; + * 690 - 695 = 0x61a00001930c + * Check simple next/prev + */ + mas_set(&mas, 686); + val = mas_walk(&mas); + MT_BUG_ON(mt, val != NULL); + + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(690 / 10)); + MT_BUG_ON(mt, mas.index != 690); + MT_BUG_ON(mt, mas.last != 695); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(680 / 10)); + MT_BUG_ON(mt, mas.index != 680); + MT_BUG_ON(mt, mas.last != 685); + + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(690 / 10)); + MT_BUG_ON(mt, mas.index != 690); + MT_BUG_ON(mt, mas.last != 695); + + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(700 / 10)); + MT_BUG_ON(mt, mas.index != 700); + MT_BUG_ON(mt, mas.last != 705); + + /* Check across node boundaries of the tree */ + mas_set(&mas, 70); + val = mas_walk(&mas); + MT_BUG_ON(mt, val != xa_mk_value(70/ 10)); + MT_BUG_ON(mt, mas.index != 70); + MT_BUG_ON(mt, mas.last != 75); + + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(80 / 10)); + MT_BUG_ON(mt, mas.index != 80); + MT_BUG_ON(mt, mas.last != 85); + + val = mas_prev(&mas, 70); + MT_BUG_ON(mt, val != xa_mk_value(70 / 10)); + MT_BUG_ON(mt, mas.index != 70); + MT_BUG_ON(mt, mas.last != 75); + + /* Check across two levels of the tree */ + mas_reset(&mas); + mas_set(&mas, 707); + val = mas_walk(&mas); + MT_BUG_ON(mt, val != NULL); + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(710 / 10)); + MT_BUG_ON(mt, mas.index != 710); + MT_BUG_ON(mt, mas.last != 715); + mn = mas.node; + + val = mas_next(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(720 / 10)); + MT_BUG_ON(mt, mas.index != 720); + MT_BUG_ON(mt, mas.last != 725); + MT_BUG_ON(mt, mn == mas.node); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(710 / 10)); + MT_BUG_ON(mt, mas.index != 710); + MT_BUG_ON(mt, mas.last != 715); + + /* Check running off the end and back on */ + mas_reset(&mas); + mas_set(&mas, 2000); + val = mas_walk(&mas); + MT_BUG_ON(mt, val != xa_mk_value(2000 / 10)); + MT_BUG_ON(mt, mas.index != 2000); + MT_BUG_ON(mt, mas.last != 2005); + + val = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, val != NULL); + MT_BUG_ON(mt, mas.index != ULONG_MAX); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(2000 / 10)); + MT_BUG_ON(mt, mas.index != 2000); + MT_BUG_ON(mt, mas.last != 2005); + + /* Check running off the start and back on */ + mas_reset(&mas); + mas_set(&mas, 10); + val = mas_walk(&mas); + MT_BUG_ON(mt, val != xa_mk_value(1)); + MT_BUG_ON(mt, mas.index != 10); + MT_BUG_ON(mt, mas.last != 15); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(0)); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 5); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + + mas.index = 0; + mas.last = 5; + mas_store(&mas, NULL); + mas_reset(&mas); + mas_set(&mas, 10); + mas_walk(&mas); + + val = mas_prev(&mas, 0); + MT_BUG_ON(mt, val != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + + mtree_destroy(mt); + + mt_init(mt); + mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL); + mtree_store_range(mt, 5, 5, xa_mk_value(5), GFP_KERNEL); + mas_set(&mas, 5); + val = mas_prev(&mas, 4); + MT_BUG_ON(mt, val != NULL); +} + +#define RCU_RANGE_COUNT 1000 +#define RCU_MT_BUG_ON(test, y) {if (y) { test->stop = true;} MT_BUG_ON(test->mt, y);} +struct rcu_test_struct2 { + struct maple_tree *mt; + + bool start; + bool stop; + unsigned int thread_count; + + unsigned int seen_toggle; + unsigned int seen_added; + unsigned int seen_modified; + unsigned int seen_deleted; + int pause; + + unsigned long index[RCU_RANGE_COUNT]; + unsigned long last[RCU_RANGE_COUNT]; +}; + +struct rcu_reader_struct { + unsigned int id; + int mod; + int del; + int flip; + int add; + int next; + struct rcu_test_struct2 *test; +}; + +/* RCU reader helper function */ +static void rcu_reader_register(struct rcu_test_struct2 *test) +{ + rcu_register_thread(); + uatomic_inc(&test->thread_count); + + while (!test->start) + usleep(test->pause * 100); +} + +static void rcu_reader_setup(struct rcu_reader_struct *reader, + unsigned int id ,struct rcu_test_struct2 *test) +{ + reader->id = id; + reader->test = test; + reader->mod = reader->id % 10; + reader->del = (reader->mod + 1) % 10; + reader->flip = (reader->mod + 2) % 10; + reader->add = (reader->mod + 3) % 10; + reader->next = (reader->mod + 4) % 10; +} + +/* RCU reader in increasing index */ +static void *rcu_reader_fwd(void *ptr) +{ + struct rcu_reader_struct *reader = (struct rcu_reader_struct*)ptr; + struct rcu_test_struct2 *test = reader->test; + unsigned long index = reader->id; + bool toggled, modified,deleted, added; + int i; + void *entry, *prev = NULL; + MA_STATE(mas, test->mt, 0, 0); + + rcu_reader_register(test); + toggled = modified = deleted = added = false; + + while(!test->stop) { + i = 0; + /* mas_for_each ?*/ + rcu_read_lock(); + mas_set(&mas, test->index[index]); + mas_for_each(&mas, entry, test->last[index + 9]) { + unsigned long r_start, r_end, alt_start; + void *expected, *alt; + + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + + if (i == reader->del) { + if (!deleted) { + alt_start = test->index[index + reader->flip]; + // delete occurred. + if (mas.index == alt_start) { + uatomic_inc(&test->seen_deleted); + deleted = true; + } + } + if (deleted) { + i = reader->flip; + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + } + } + + if (!added && (i == reader->add)) { + alt_start = test->index[index + reader->next]; + if (mas.index == r_start) { + uatomic_inc(&test->seen_added); + added = true; + } else if (mas.index == alt_start) { + i = reader->next; + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + } + } + + RCU_MT_BUG_ON(test, mas.index != r_start); + RCU_MT_BUG_ON(test, mas.last != r_end); + + if (i == reader->flip) { + alt = xa_mk_value(index + i + RCU_RANGE_COUNT); + if (prev) { + if (toggled && entry == expected) + uatomic_inc(&test->seen_toggle); + else if (!toggled && entry == alt) + uatomic_inc(&test->seen_toggle); + } + + if (entry == expected) + toggled = false; + else if (entry == alt) + toggled = true; + else { + printk("!!%lu-%lu -> %p not %p or %p\n", mas.index, mas.last, entry, expected, alt); + RCU_MT_BUG_ON(test, 1); + } + + prev = entry; + } else if (i == reader->mod) { + alt = xa_mk_value(index + i * 2 + 1 + + RCU_RANGE_COUNT); + if (entry != expected) { + if (!modified) + uatomic_inc(&test->seen_modified); + modified = true; + } else { + if (modified) + uatomic_inc(&test->seen_modified); + modified = false; + } + + if (modified) + RCU_MT_BUG_ON(test, entry != alt); + + } else { + if (entry != expected) + printk("!!%lu-%lu -> %p not %p\n", mas.index, mas.last, entry, expected); + RCU_MT_BUG_ON(test, entry != expected); + } + + i++; + } + rcu_read_unlock(); + usleep(test->pause); + } + + rcu_unregister_thread(); + return NULL; +} + +/* RCU reader in decreasing index */ +static void *rcu_reader_rev(void *ptr) +{ + struct rcu_reader_struct *reader = (struct rcu_reader_struct*)ptr; + struct rcu_test_struct2 *test = reader->test; + unsigned long index = reader->id; + bool toggled, modified,deleted, added; + int i; + void *prev = NULL; + MA_STATE(mas, test->mt, 0, 0); + + rcu_reader_register(test); + toggled = modified = deleted = added = false; + + + while(!test->stop) { + void *entry; + i = 9; + mas_set(&mas, test->index[index + i]); + + rcu_read_lock(); + while(i--) { + unsigned long r_start, r_end, alt_start; + void *expected, *alt; + int line = __LINE__; + + entry = mas_prev(&mas, test->index[index]); + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + + if (i == reader->del) { + alt_start = test->index[index + reader->mod]; + if (mas.index == alt_start) { + line = __LINE__; + if (!deleted) + uatomic_inc(&test->seen_deleted); + deleted = true; + } + if (deleted) { + line = __LINE__; + i = reader->mod; + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + } + } + if (!added && (i == reader->add)) { + alt_start = test->index[index + reader->flip]; + if (mas.index == r_start) { + line = __LINE__; + uatomic_inc(&test->seen_added); + added = true; + } else if (mas.index == alt_start) { + line = __LINE__; + i = reader->flip; + r_start = test->index[index + i]; + r_end = test->last[index + i]; + expected = xa_mk_value(r_start); + } + } + + if (i == reader->mod) { + line = __LINE__; + } + else if (i == reader->flip) + line = __LINE__; + + if (mas.index != r_start) { + alt = xa_mk_value(index + i * 2 + 1 + + RCU_RANGE_COUNT); + mt_dump(test->mt); + printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n", + mas.index, mas.last, entry, + r_start, r_end, expected, alt, + line, i); + } + RCU_MT_BUG_ON(test, mas.index != r_start); + RCU_MT_BUG_ON(test, mas.last != r_end); + + if (i == reader->mod) { + alt = xa_mk_value(index + i * 2 + 1 + + RCU_RANGE_COUNT); + + if (entry != expected) { + if (!modified) + uatomic_inc(&test->seen_modified); + modified = true; + } else { + if (modified) + uatomic_inc(&test->seen_modified); + modified = false; + } + if (modified) + RCU_MT_BUG_ON(test, entry != alt); + + + } else if (i == reader->flip) { + alt = xa_mk_value(index + i + + RCU_RANGE_COUNT); + if (prev) { + if (toggled && entry == expected) + uatomic_inc(&test->seen_toggle); + else if (!toggled && entry == alt) + uatomic_inc(&test->seen_toggle); + } + + if (entry == expected) + toggled = false; + else if (entry == alt) + toggled = true; + else { + printk("%lu-%lu %p != %p or %p\n", mas.index, mas.last, + entry, expected, alt); + RCU_MT_BUG_ON(test, 1); + } + + prev = entry; + } else { + if (entry != expected) + printk("%lu-%lu %p != %p\n", mas.index, mas.last, + entry, expected); + RCU_MT_BUG_ON(test, entry != expected); + } + } + rcu_read_unlock(); + usleep(test->pause); + } + + rcu_unregister_thread(); + return NULL; +} + +static void rcu_stress_rev(struct maple_tree *mt, struct rcu_test_struct2 *test, + int count, struct rcu_reader_struct *test_reader) +{ + int i, j = 10000; + bool toggle = true; + + test->start = true; /* Release the hounds! */ + usleep(5); + + while(j--) { + toggle = !toggle; + i = count; + while(i--) { + unsigned long start, end; + struct rcu_reader_struct *this = &test_reader[i]; + + /* Mod offset */ + if (j == 600) { + start = test->index[this->id + this->mod]; + end = test->last[this->id + this->mod]; + mtree_store_range(mt, start, end, + xa_mk_value(this->id + this->mod * 2 + + 1 + RCU_RANGE_COUNT), + GFP_KERNEL); + } + + /* Toggle */ + if (!(j % 5)) { + start = test->index[this->id + this->flip]; + end = test->last[this->id + this->flip]; + mtree_store_range(mt, start, end, + xa_mk_value((toggle ? start : + this->id + this->flip + + RCU_RANGE_COUNT)), + GFP_KERNEL); + } + + /* delete */ + if (j == 400) { + start = test->index[this->id + this->del]; + end = test->last[this->id + this->del]; + mtree_store_range(mt, start, end, NULL, GFP_KERNEL); + } + + /* add */ + if (j == 500) { + start = test->index[this->id + this->add]; + end = test->last[this->id + this->add]; + mtree_store_range(mt, start, end, + xa_mk_value(start), GFP_KERNEL); + } + } + usleep(test->pause); + /* If a test fails, don't flood the console */ + if (test->stop) + break; + } +} + +static void rcu_stress_fwd(struct maple_tree *mt, struct rcu_test_struct2 *test, + int count, struct rcu_reader_struct *test_reader) +{ + int j, i; + bool toggle = true; + + test->start = true; /* Release the hounds! */ + usleep(5); + for (j = 0; j < 10000; j++) { + toggle = !toggle; + for (i = 0; i < count; i++) { + unsigned long start, end; + struct rcu_reader_struct *this = &test_reader[i]; + + /* Mod offset */ + if (j == 600) { + start = test->index[this->id + this->mod]; + end = test->last[this->id + this->mod]; + mtree_store_range(mt, start, end, + xa_mk_value(this->id + this->mod * 2 + + 1 + RCU_RANGE_COUNT), + GFP_KERNEL); + } + + /* Toggle */ + if (!(j % 5)) { + start = test->index[this->id + this->flip]; + end = test->last[this->id + this->flip]; + mtree_store_range(mt, start, end, + xa_mk_value((toggle ? start : + this->id + this->flip + + RCU_RANGE_COUNT)), + GFP_KERNEL); + } + + /* delete */ + if (j == 400) { + start = test->index[this->id + this->del]; + end = test->last[this->id + this->del]; + mtree_store_range(mt, start, end, NULL, GFP_KERNEL); + } + + /* add */ + if (j == 500) { + start = test->index[this->id + this->add]; + end = test->last[this->id + this->add]; + mtree_store_range(mt, start, end, + xa_mk_value(start), GFP_KERNEL); + } + } + usleep(test->pause); + /* If a test fails, don't flood the console */ + if (test->stop) + break; + } +} + +/* + * This is to check: + * 1. Range that is not ever present + * 2. Range that is always present + * 3. Things being added but not removed. + * 4. Things being removed but not added. + * 5. Things are being added and removed, searches my succeed or fail + * + * This sets up two readers for every 10 entries; one forward and one reverse + * reading. + */ +static void rcu_stress(struct maple_tree *mt, bool forward) +{ + unsigned int count, i; + unsigned long r, seed; + pthread_t readers[RCU_RANGE_COUNT / 5]; + struct rcu_test_struct2 test; + struct rcu_reader_struct test_reader[RCU_RANGE_COUNT / 5]; + void *(*function)(void*); + + /* Test setup */ + test.mt = mt; + test.pause = 5; + test.seen_toggle = 0; + test.seen_deleted = 0; + test.seen_added = 0; + test.seen_modified = 0; + test.thread_count = 0; + test.start = test.stop = false; + seed = time(NULL); + srand(seed); + for (i = 0; i < RCU_RANGE_COUNT;i++) { + r = seed + rand(); + mtree_store_range(mt, seed, r, + xa_mk_value(seed), GFP_KERNEL); + + /* Record start and end of entry */ + test.index[i] = seed; + test.last[i] = r; + seed = 1 + r + rand() % 10; + } + + i = count = ARRAY_SIZE(readers); + while(i--) { + unsigned long id; + id = i / 2 * 10; + if (i % 2) + function = rcu_reader_fwd; + else + function = rcu_reader_rev; + + rcu_reader_setup(&test_reader[i], id, &test); + if (pthread_create(&readers[i], NULL, *function, + &test_reader[i])) { + perror("creating reader thread"); + exit(1); + } + } + + for (i = 0; i < ARRAY_SIZE(readers); i++) { + struct rcu_reader_struct *this = &test_reader[i]; + int add = this->id + this->add; + + /* Remove add entries from the tree for later addition */ + mtree_store_range(mt, test.index[add], test.last[add], + NULL, GFP_KERNEL); + } + + mt_set_in_rcu(mt); + do { + usleep(5); + } while(test.thread_count > ARRAY_SIZE(readers)); + + if (forward) + rcu_stress_fwd(mt, &test, count, test_reader); + else + rcu_stress_rev(mt, &test, count, test_reader); + + test.stop = true; + while(count--) + pthread_join(readers[count], NULL); + + mt_validate(mt); +} + + +struct rcu_test_struct { + struct maple_tree *mt; /* the maple tree */ + int count; /* Number of times to check value(s) */ + unsigned long index; /* The first index to check */ + void *entry1; /* The first entry value */ + void *entry2; /* The second entry value */ + void *entry3; /* The third entry value */ + + bool update_2; + bool update_3; + unsigned long range_start; + unsigned long range_end; + unsigned int loop_sleep; + unsigned int val_sleep; + + unsigned int failed; /* failed detection for other threads */ + unsigned int seen_entry2; /* Number of threads that have seen the new value */ + unsigned int seen_entry3; /* Number of threads that have seen the new value */ + unsigned int seen_both; /* Number of threads that have seen both new values */ + unsigned int seen_toggle; + unsigned int seen_added; + unsigned int seen_removed; + unsigned long last; /* The end of the range to write. */ + + unsigned long removed; /* The index of the removed entry */ + unsigned long added; /* The index of the removed entry */ + unsigned long toggle; /* The index of the removed entry */ +}; + +static inline +int eval_rcu_entry(struct rcu_test_struct *test, void *entry, bool *update_2, + bool *update_3) +{ + if (entry == test->entry1) + return 0; + + if (entry == test->entry2) { + if (!(*update_2)) { + uatomic_inc(&test->seen_entry2); + *update_2 = true; + if (update_3) + uatomic_inc(&test->seen_both); + } + return 0; + } + + if (entry == test->entry3) { + if (!(*update_3)) { + uatomic_inc(&test->seen_entry3); + *update_3 = true; + if (update_2) + uatomic_inc(&test->seen_both); + } + return 0; + } + + return 1; +} + +/* + * rcu_val() - Read a given value in the tree test->count times using the + * regular API + * + * @ptr: The pointer to the rcu_test_struct + */ +static void *rcu_val(void *ptr) +{ + struct rcu_test_struct *test = (struct rcu_test_struct*)ptr; + unsigned long count = test->count; + bool update_2 = false; + bool update_3 = false; + void *entry; + + rcu_register_thread(); + while (count--) { + usleep(test->val_sleep); + /* + * No locking required, regular API locking is handled in the + * maple tree code + */ + entry = mtree_load(test->mt, test->index); + MT_BUG_ON(test->mt, eval_rcu_entry(test, entry, &update_2, + &update_3)); + } + rcu_unregister_thread(); + return NULL; +} + +/* + * rcu_loop() - Loop over a section of the maple tree, checking for an expected + * value using the advanced API + * + * @ptr - The pointer to the rcu_test_struct + */ +static void *rcu_loop(void *ptr) +{ + struct rcu_test_struct *test = (struct rcu_test_struct*)ptr; + unsigned long count = test->count; + void *entry, *expected; + bool update_2 = false; + bool update_3 = false; + MA_STATE(mas, test->mt, test->range_start, test->range_start); + + rcu_register_thread(); + + /* + * Loop through the test->range_start - test->range_end test->count + * times + */ + while (count--) { + usleep(test->loop_sleep); + rcu_read_lock(); + mas_for_each(&mas, entry, test->range_end) { + /* The expected value is based on the start range. */ + expected = xa_mk_value(mas.index ? mas.index / 10 : 0); + + /* Out of the interesting range */ + if (mas.index < test->index || mas.index > test->last) { + if (entry != expected) { + printk("%lx - %lx = %p not %p\n", + mas.index, mas.last, entry, expected); + } + MT_BUG_ON(test->mt, entry != expected); + continue; + } + + if (entry == expected) + continue; /* Not seen. */ + + /* In the interesting range */ + MT_BUG_ON(test->mt, eval_rcu_entry(test, entry, + &update_2, + &update_3)); + } + rcu_read_unlock(); + mas_set(&mas, test->range_start); + } + + rcu_unregister_thread(); + return NULL; +} + +static noinline +void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals) +{ + + int i; + void *(*function)(void*); + pthread_t readers[20]; + + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_in_rcu(mt)); + + for (i = 0; i < ARRAY_SIZE(readers); i++) { + if (i % 2) + function = rcu_loop; + else + function = rcu_val; + + if (pthread_create(&readers[i], NULL, *function, vals)) { + perror("creating reader thread"); + exit(1); + } + } + + usleep(5); /* small yield to ensure all threads are at least started. */ + mtree_store_range(mt, vals->index, vals->last, vals->entry2, + GFP_KERNEL); + while (i--) + pthread_join(readers[i], NULL); + + /* Make sure the test caught at least one update. */ + MT_BUG_ON(mt, !vals->seen_entry2); +} + +static noinline +void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals) +{ + + int i; + void *(*function)(void*); + pthread_t readers[20]; + unsigned int index = vals->index; + + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_in_rcu(mt)); + + for (i = 0; i < ARRAY_SIZE(readers); i++) { + if (i % 2) + function = rcu_loop; + else + function = rcu_val; + + if (pthread_create(&readers[i], NULL, *function, vals)) { + perror("creating reader thread"); + exit(1); + } + } + + usleep(5); /* small yield to ensure all threads are at least started. */ + + while (index <= vals->last) { + mtree_store(mt, index, + (index % 2 ? vals->entry2 : vals->entry3), + GFP_KERNEL); + index++; + usleep(5); + } + + while (i--) + pthread_join(readers[i], NULL); + + /* Make sure the test caught at least one update. */ + MT_BUG_ON(mt, !vals->seen_entry2); + MT_BUG_ON(mt, !vals->seen_entry3); + MT_BUG_ON(mt, !vals->seen_both); +} +static noinline void check_rcu_simulated(struct maple_tree *mt) +{ + unsigned long i, nr_entries = 1000; + unsigned long target = 4320; + unsigned long val = 0xDEAD; + + MA_STATE(mas_writer, mt, 0, 0); + MA_STATE(mas_reader, mt, target, target); + + rcu_register_thread(); + + mt_set_in_rcu(mt); + mas_lock(&mas_writer); + for (i = 0; i <= nr_entries; i++) { + mas_writer.index = i * 10; + mas_writer.last = i * 10 + 5; + mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL); + } + mas_unlock(&mas_writer); + + /* Overwrite one entry with a new value. */ + mas_set_range(&mas_writer, target, target + 5); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val)); + rcu_read_unlock(); + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + mas_reset(&mas_reader); + + + /* Overwrite 1/2 the entry */ + mas_set_range(&mas_writer, target, target + 2); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val)); + rcu_read_unlock(); + + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + mas_reset(&mas_reader); + + /* Overwrite last 1/2 the entry */ + mas_set_range(&mas_writer, target + 2, target + 5); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + rcu_read_unlock(); + + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + mas_reset(&mas_reader); + + /* Overwrite more than the entry */ + mas_set_range(&mas_writer, target - 5, target + 15); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val)); + rcu_read_unlock(); + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + mas_reset(&mas_reader); + + /* Overwrite more than the node. */ + mas_set_range(&mas_writer, target - 400, target + 400); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val)); + rcu_read_unlock(); + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + mas_reset(&mas_reader); + + /* Overwrite the tree */ + mas_set_range(&mas_writer, 0, ULONG_MAX); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val)); + rcu_read_unlock(); + + /* Clear out tree & recreate it */ + mas_lock(&mas_writer); + mas_set_range(&mas_writer, 0, ULONG_MAX); + mas_store_gfp(&mas_writer, NULL, GFP_KERNEL); + mas_set_range(&mas_writer, 0, 0); + for (i = 0; i <= nr_entries; i++) { + mas_writer.index = i * 10; + mas_writer.last = i * 10 + 5; + mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL); + } + mas_unlock(&mas_writer); + + /* next check */ + /* Overwrite one entry with a new value. */ + mas_reset(&mas_reader); + mas_set_range(&mas_writer, target, target + 5); + mas_set_range(&mas_reader, target, target); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_prev(&mas_reader, 0); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_next(&mas_reader, ULONG_MAX) != xa_mk_value(val)); + rcu_read_unlock(); + + /* Restore value. */ + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL); + mas_unlock(&mas_writer); + + /* prev check */ + /* Overwrite one entry with a new value. */ + mas_reset(&mas_reader); + mas_set_range(&mas_writer, target, target + 5); + mas_set_range(&mas_reader, target, target); + rcu_read_lock(); + MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10)); + mas_next(&mas_reader, ULONG_MAX); + mas_lock(&mas_writer); + mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL); + mas_unlock(&mas_writer); + MT_BUG_ON(mt, mas_prev(&mas_reader, 0) != xa_mk_value(val)); + rcu_read_unlock(); + + rcu_unregister_thread(); +} + +static noinline void check_rcu_threaded(struct maple_tree *mt) +{ + unsigned long i, nr_entries = 1000; + struct rcu_test_struct vals; + + vals.val_sleep = 200; + vals.loop_sleep = 110; + + rcu_register_thread(); + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + /* Store across several slots. */ + vals.count = 1000; + vals.mt = mt; + vals.index = 8650; + vals.last = 8666; + vals.entry1 = xa_mk_value(865); + vals.entry2 = xa_mk_value(8650); + vals.entry3 = xa_mk_value(8650); + vals.range_start = 0; + vals.range_end = ULONG_MAX; + vals.seen_entry2 = 0; + vals.seen_entry3 = 0; + + run_check_rcu(mt, &vals); + mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + /* 4390-4395: value 439 (0x1b7) [0x36f] */ + /* Store across several slots. */ + /* Spanning store. */ + vals.count = 10000; + vals.mt = mt; + vals.index = 4390; + vals.last = 4398; + vals.entry1 = xa_mk_value(4390); + vals.entry2 = xa_mk_value(439); + vals.entry3 = xa_mk_value(439); + vals.seen_entry2 = 0; + vals.range_start = 4316; + vals.range_end = 5035; + run_check_rcu(mt, &vals); + mtree_destroy(mt); + + + /* Forward writer for rcu stress */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + rcu_stress(mt, true); + mtree_destroy(mt); + + /* Reverse writer for rcu stress */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + rcu_stress(mt, false); + mtree_destroy(mt); + + /* Slow reader test with spanning store. */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + /* 4390-4395: value 439 (0x1b7) [0x36f] */ + /* Store across several slots. */ + /* Spanning store. */ + vals.count = 15000; + vals.mt = mt; + vals.index = 4390; + vals.last = 4398; + vals.entry1 = xa_mk_value(4390); + vals.entry2 = xa_mk_value(439); + vals.entry3 = xa_mk_value(4391); + vals.seen_toggle = 0; + vals.seen_added = 0; + vals.seen_removed = 0; + vals.range_start = 4316; + vals.range_end = 5035; + vals.removed = 4360; + vals.added = 4396; + vals.toggle = 4347; + vals.val_sleep = 400; + vals.loop_sleep = 200; + vals.seen_entry2 = 0; + vals.seen_entry3 = 0; + vals.seen_both = 0; + vals.entry3 = xa_mk_value(438); + + run_check_rcu_slowread(mt, &vals); + rcu_unregister_thread(); +} + +extern void test_kmem_cache_bulk(void); + +/* Test spanning writes that require balancing right sibling or right cousin */ +static noinline void check_spanning_relatives(struct maple_tree *mt) +{ + + unsigned long i, nr_entries = 1000; + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + + mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL); +} + +static noinline void check_fuzzer(struct maple_tree *mt) +{ + /* + * 1. Causes a spanning rebalance of a single root node. + * Fixed by setting the correct limit in mast_cp_to_nodes() when the + * entire right side is consumed. + */ + mtree_test_insert(mt, 88, (void*)0xb1); // 0 + mtree_test_insert(mt, 84, (void*)0xa9); // 0 + mtree_test_insert(mt, 2, (void*)0x5); // 0 + mtree_test_insert(mt, 4, (void*)0x9); // 0 + mtree_test_insert(mt, 14, (void*)0x1d); // 0 + mtree_test_insert(mt, 7, (void*)0xf); // 0 + mtree_test_insert(mt, 12, (void*)0x19); // 0 + mtree_test_insert(mt, 18, (void*)0x25); // 0 + mtree_test_store_range(mt, 8, 18, (void*)0x11); + mtree_destroy(mt); + + + /* + * 2. Cause a spanning rebalance of two nodes in root. + * Fixed by setting mast->r->max correctly. + */ + mt_init_flags(mt, 0); + mtree_test_store(mt, 87, (void*)0xaf); // 0 + mtree_test_store(mt, 0, (void*)0x1); // 0 + mtree_test_load(mt, 4); // (nil) + mtree_test_insert(mt, 4, (void*)0x9); // 0 + mtree_test_store(mt, 8, (void*)0x11); // 0 + mtree_test_store(mt, 44, (void*)0x59); // 0 + mtree_test_store(mt, 68, (void*)0x89); // 0 + mtree_test_store(mt, 2, (void*)0x5); // 0 + mtree_test_insert(mt, 43, (void*)0x57); // 0 + mtree_test_insert(mt, 24, (void*)0x31); // 0 + mtree_test_insert(mt, 844, (void*)0x699); // 0 + mtree_test_store(mt, 84, (void*)0xa9); // 0 + mtree_test_store(mt, 4, (void*)0x9); // 0 + mtree_test_erase(mt, 4); // 0x9 + mtree_test_load(mt, 5); // (nil) + mtree_test_erase(mt, 0); // 0x1 + mtree_destroy(mt); + + /* + * 3. Cause a node overflow on copy + * Fixed by using the correct check for node size in mas_wr_modify() + * Also discovered issue with metadata setting. + */ + mt_init_flags(mt, 0); + mtree_test_store_range(mt, 0, 18446744073709551615UL, (void*)0x1); // 0 + mtree_test_store(mt, 4, (void*)0x9); // 0 + mtree_test_erase(mt, 5); // 0x1 + mtree_test_erase(mt, 0); // 0x1 + mtree_test_erase(mt, 4); // 0x9 + mtree_test_store(mt, 5, (void*)0xb); // 0 + mtree_test_erase(mt, 5); // 0xb + mtree_test_store(mt, 5, (void*)0xb); // 0 + mtree_test_erase(mt, 5); // 0xb + mtree_test_erase(mt, 4); // (nil) + mtree_test_store(mt, 4, (void*)0x9); // 0 + mtree_test_store(mt, 444, (void*)0x379); // 0 + mtree_test_store(mt, 0, (void*)0x1); // 0 + mtree_test_load(mt, 0); // 0x1 + mtree_test_store(mt, 5, (void*)0xb); // 0 + mtree_test_erase(mt, 0); + mtree_destroy(mt); + + /* + * 4. spanning store failure due to writing incorrect pivot value at + * last slot. + * Fixed by setting mast->r->max correctly in mast_cp_to_nodes() + * + */ + mt_init_flags(mt, 0); + mtree_test_insert(mt, 261, (void*)0x20b); // 0 + mtree_test_store(mt , 516, (void*)0x409); // 0 + mtree_test_store(mt , 6, (void*)0xd); // 0 + mtree_test_insert(mt, 5, (void*)0xb); // 0 + mtree_test_insert(mt, 1256, (void*)0x9d1); // 0 + mtree_test_store(mt , 4, (void*)0x9); // 0 + mtree_test_erase(mt, 1); // (nil) + mtree_test_store(mt , 56, (void*)0x71); // 0 + mtree_test_insert(mt, 1, (void*)0x3); // 0 + mtree_test_store(mt , 24, (void*)0x31); // 0 + mtree_test_erase(mt, 1); // (nil) + mtree_test_insert(mt, 2263, (void*)0x11af); // 0 + mtree_test_insert(mt, 446, (void*)0x37d); // 0 + mtree_test_store_range(mt, 6, 45, (void*)0xd); // 0 + mtree_test_store_range(mt, 3, 446, (void*)0x7); // 0 + mtree_destroy(mt); + + /* + * 5. mas_wr_extend_null() may overflow slots. + * Fix by checking against wr_mas->node_end. + */ + mt_init_flags(mt, 0); + mtree_test_store(mt , 48, (void *)0x61); // 0 + mtree_test_store(mt , 3, (void *)0x7); // 0 + mtree_test_load(mt, 0); // (nil) + mtree_test_store(mt , 88, (void *)0xb1); // 0 + mtree_test_store(mt , 81, (void *)0xa3); // 0 + mtree_test_insert(mt, 0, (void *)0x1); // 0 + mtree_test_insert(mt, 8, (void *)0x11); // 0 + mtree_test_insert(mt, 4, (void *)0x9); // 0 + mtree_test_insert(mt, 2480, (void *)0x1361); // 0 + mtree_test_insert(mt, 18446744073709551615UL, + (void *)0xffffffffffffffff); // 0 + mtree_test_erase(mt, 18446744073709551615UL); // (nil) + mtree_destroy(mt); + + /* + * 6. When reusing a node with an implied pivot and the node is + * shrinking, old data would be left in the implied slot + * Fixed by checking the last pivot for the mas->max and clear + * accordingly. This only affected the left-most node as that node is + * the only one allowed to end in NULL. + */ + mt_init_flags(mt, 0); + mtree_test_erase(mt, 3); // (nil) + mtree_test_insert(mt, 22, (void *)0x2d); // 0 + mtree_test_insert(mt, 15, (void *)0x1f); // 0 + mtree_test_load(mt, 2); // (nil) + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert(mt, 5, (void *)0xb); // 0 + mtree_test_erase(mt, 1); // (nil) + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert(mt, 4, (void *)0x9); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_erase(mt, 1); // 0x3 + mtree_test_insert(mt, 2, (void *)0x5); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_erase(mt, 3); // 0x3 + mtree_test_insert(mt, 22, (void *)0x2d); // 0 + mtree_test_insert(mt, 15, (void *)0x1f); // -17 + mtree_test_insert(mt, 2, (void *)0x5); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_insert(mt, 8, (void *)0x11); // -17 + mtree_test_load(mt, 2); // (nil) + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_store(mt , 1, (void *)0x3); // -17 + mtree_test_insert(mt, 5, (void *)0xb); // 0 + mtree_test_erase(mt, 1); // 0x5 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_insert(mt, 4, (void *)0x9); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 1); // 0x3 + mtree_test_insert(mt, 2, (void *)0x5); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 3); // 0x3 + mtree_test_insert(mt, 22, (void *)0x2d); // 0 + mtree_test_insert(mt, 15, (void *)0x1f); // -17 + mtree_test_insert(mt, 2, (void *)0x5); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_insert(mt, 8, (void *)0x11); // -17 + mtree_test_insert(mt, 12, (void *)0x19); // -17 + mtree_test_erase(mt, 1); // (nil) + mtree_test_store_range(mt, 4, 62, (void *)0x9); // 0 + mtree_test_erase(mt, 62); // 0x3 + mtree_test_store_range(mt, 1, 0, (void *)0x3); // 0 + mtree_test_insert(mt, 11, (void *)0x17); // -22 + mtree_test_insert(mt, 3, (void *)0x7); // 0 + mtree_test_insert(mt, 3, (void *)0x7); // 0 + mtree_test_store(mt , 62, (void *)0x7d); // -17 + mtree_test_erase(mt, 62); // 0x9 + mtree_test_store_range(mt, 1, 15, (void *)0x3); // 0 + mtree_test_erase(mt, 1); // 0x7d + mtree_test_insert(mt, 22, (void *)0x2d); // 0 + mtree_test_insert(mt, 12, (void *)0x19); // 0 + mtree_test_erase(mt, 1); // 0x3 + mtree_test_insert(mt, 3, (void *)0x7); // 0 + mtree_test_store(mt , 62, (void *)0x7d); // 0 + mtree_test_erase(mt, 62); // (nil) + mtree_test_insert(mt, 122, (void *)0xf5); // 0 + mtree_test_store(mt , 3, (void *)0x7); // 0 + mtree_test_insert(mt, 0, (void *)0x1); // 0 + mtree_test_store_range(mt, 0, 1, (void *)0x1); // 0 + mtree_test_insert(mt, 85, (void *)0xab); // 0 + mtree_test_insert(mt, 72, (void *)0x91); // 0 + mtree_test_insert(mt, 81, (void *)0xa3); // 0 + mtree_test_insert(mt, 726, (void *)0x5ad); // 0 + mtree_test_insert(mt, 0, (void *)0x1); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_store(mt , 51, (void *)0x67); // -17 + mtree_test_insert(mt, 611, (void *)0x4c7); // 0 + mtree_test_insert(mt, 485, (void *)0x3cb); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_erase(mt, 1); // 0x7d + mtree_test_insert(mt, 0, (void *)0x1); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert_range(mt, 26, 1, (void *)0x35); // 0 + mtree_test_load(mt, 1); // 0x1 + mtree_test_store_range(mt, 1, 22, (void *)0x3); // -22 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_erase(mt, 1); // 0x3 + mtree_test_load(mt, 53); // 0x3 + mtree_test_load(mt, 1); // 0xab + mtree_test_store_range(mt, 1, 1, (void *)0x3); // -17 + mtree_test_insert(mt, 222, (void *)0x1bd); // 0 + mtree_test_insert(mt, 485, (void *)0x3cb); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 1); // (nil) + mtree_test_load(mt, 0); // 0x3 + mtree_test_insert(mt, 21, (void *)0x2b); // -17 + mtree_test_insert(mt, 3, (void *)0x7); // 0 + mtree_test_store(mt , 621, (void *)0x4db); // 0 + mtree_test_insert(mt, 0, (void *)0x1); // 0 + mtree_test_erase(mt, 5); // 0x1 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_store(mt , 62, (void *)0x7d); // 0 + mtree_test_erase(mt, 62); // (nil) + mtree_test_store_range(mt, 1, 0, (void *)0x3); // 0 + mtree_test_insert(mt, 22, (void *)0x2d); // -22 + mtree_test_insert(mt, 12, (void *)0x19); // 0 + mtree_test_erase(mt, 1); // 0x7d + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_store_range(mt, 4, 62, (void *)0x9); // 0 + mtree_test_erase(mt, 62); // 0x3 + mtree_test_erase(mt, 1); // 0x9 + mtree_test_load(mt, 1); // 0x3 + mtree_test_store_range(mt, 1, 22, (void *)0x3); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_erase(mt, 1); // (nil) + mtree_test_load(mt, 53); // 0x3 + mtree_test_load(mt, 1); // (nil) + mtree_test_store_range(mt, 1, 1, (void *)0x3); // -17 + mtree_test_insert(mt, 222, (void *)0x1bd); // 0 + mtree_test_insert(mt, 485, (void *)0x3cb); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 1); // (nil) + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_load(mt, 0); // 0x3 + mtree_test_load(mt, 0); // 0x1 + mtree_destroy(mt); + + /* + * 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old + * data by overwriting it first - that way metadata is of no concern. + */ + mt_init_flags(mt, 0); + mtree_test_load(mt, 1); // (nil) + mtree_test_insert(mt, 102, (void *)0xcd); // -17 + mtree_test_erase(mt, 2); // (nil) + mtree_test_erase(mt, 0); // (nil) + mtree_test_load(mt, 0); // (nil) + mtree_test_insert(mt, 4, (void *)0x9); // 0 + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_insert(mt, 110, (void *)0xdd); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_insert_range(mt, 5, 0, (void *)0xb); // 0 + mtree_test_erase(mt, 2); // (nil) + mtree_test_store(mt , 0, (void *)0x1); // -22 + mtree_test_store(mt , 112, (void *)0xe1); // 0 + mtree_test_insert(mt, 21, (void *)0x2b); // 0 + mtree_test_store(mt , 1, (void *)0x3); // 0 + mtree_test_insert_range(mt, 110, 2, (void *)0xdd); // 0 + mtree_test_store(mt , 2, (void *)0x5); // -22 + mtree_test_load(mt, 22); // 0x5 + mtree_test_erase(mt, 2); // (nil) + mtree_test_store(mt , 210, (void *)0x1a5); // 0 + mtree_test_store_range(mt, 0, 2, (void *)0x1); // 0 + mtree_test_store(mt , 2, (void *)0x5); // 0 + mtree_test_erase(mt, 2); // 0x5 + mtree_test_erase(mt, 22); // 0x5 + mtree_test_erase(mt, 1); // (nil) + mtree_test_erase(mt, 2); // (nil) + mtree_test_store(mt , 0, (void *)0x1); // 0 + mtree_test_load(mt, 112); // (nil) + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_erase(mt, 2); // (nil) + mtree_test_store(mt , 1, (void *)0x3); // 0 + mtree_test_insert_range(mt, 1, 2, (void *)0x3); // 0 + mtree_test_erase(mt, 0); // 0x5 + mtree_test_erase(mt, 2); // 0x1 + mtree_test_store(mt , 2, (void *)0x5); // -17 + mtree_test_erase(mt, 0); // 0x5 + mtree_test_erase(mt, 2); // (nil) + mtree_test_store(mt , 0, (void *)0x1); // 0 + mtree_test_store(mt , 0, (void *)0x1); // 0 + mtree_test_erase(mt, 2); // 0x5 + mtree_test_store(mt , 2, (void *)0x5); // 0 + mtree_test_erase(mt, 2); // (nil) + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_insert_range(mt, 1, 2, (void *)0x3); // 0 + mtree_test_erase(mt, 0); // 0x5 + mtree_test_erase(mt, 2); // 0x1 + mtree_test_store(mt , 0, (void *)0x1); // -17 + mtree_test_load(mt, 112); // 0x5 + mtree_test_store_range(mt, 110, 12, (void *)0xdd); // 0 + mtree_test_store(mt , 2, (void *)0x5); // -22 + mtree_test_load(mt, 110); // (nil) + mtree_test_insert_range(mt, 4, 71, (void *)0x9); // 0 + mtree_test_load(mt, 2); // (nil) + mtree_test_store(mt , 2, (void *)0x5); // 0 + mtree_test_insert_range(mt, 11, 22, (void *)0x17); // 0 + mtree_test_erase(mt, 12); // 0x5 + mtree_test_store(mt , 2, (void *)0x5); // -17 + mtree_test_load(mt, 22); // 0x9 + mtree_destroy(mt); + + + /* + * 8. When rebalancing or spanning_rebalance(), the max of the new node + * may be set incorrectly to the final pivot and not the right max. + * Fix by setting the left max to orig right max if the entire node is + * consumed. + */ + mt_init_flags(mt, 0); + mtree_test_store(mt , 6, (void *)0xd); // 0 + mtree_test_store(mt , 67, (void *)0x87); // 0 + mtree_test_insert(mt, 15, (void *)0x1f); // 0 + mtree_test_insert(mt, 6716, (void *)0x3479); // 0 + mtree_test_store(mt , 61, (void *)0x7b); // 0 + mtree_test_insert(mt, 13, (void *)0x1b); // 0 + mtree_test_store(mt , 8, (void *)0x11); // 0 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_load(mt, 0); // (nil) + mtree_test_erase(mt, 67167); // (nil) + mtree_test_insert_range(mt, 6, 7167, (void *)0xd); // 0 + mtree_test_insert(mt, 6, (void *)0xd); // -17 + mtree_test_erase(mt, 67); // (nil) + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 667167); // 0x87 + mtree_test_insert(mt, 6, (void *)0xd); // -17 + mtree_test_store(mt , 67, (void *)0x87); // -17 + mtree_test_insert(mt, 5, (void *)0xb); // 0 + mtree_test_erase(mt, 1); // 0x3479 + mtree_test_insert(mt, 6, (void *)0xd); // 0 + mtree_test_erase(mt, 67); // 0x3 + mtree_test_insert(mt, 15, (void *)0x1f); // -17 + mtree_test_insert(mt, 67167, (void *)0x20cbf); // -17 + mtree_test_insert(mt, 1, (void *)0x3); // 0 + mtree_test_load(mt, 7); // 0x87 + mtree_test_insert(mt, 16, (void *)0x21); // 0 + mtree_test_insert(mt, 36, (void *)0x49); // 0 + mtree_test_store(mt , 67, (void *)0x87); // 0 + mtree_test_store(mt , 6, (void *)0xd); // 0 + mtree_test_insert(mt, 367, (void *)0x2df); // 0 + mtree_test_insert(mt, 115, (void *)0xe7); // 0 + mtree_test_store(mt , 0, (void *)0x1); // 0 + mtree_test_store_range(mt, 1, 3, (void *)0x3); // 0 + mtree_test_store(mt , 1, (void *)0x3); // 0 + mtree_test_erase(mt, 67167); // (nil) + mtree_test_insert_range(mt, 6, 47, (void *)0xd); // 0 + mtree_test_store(mt , 1, (void *)0x3); // -17 + mtree_test_insert_range(mt, 1, 67, (void *)0x3); // 0 + mtree_test_load(mt, 67); // 0x20cbf + mtree_test_insert(mt, 1, (void *)0x3); // -17 + mtree_test_erase(mt, 67167); // 0x87 + mtree_destroy(mt); + + /* + * 9. spanning store to the end of data caused an invalid metadata + * length which resulted in a crash eventually. + * Fix by checking if there is a value in pivot before incrementing the + * metadata end in mab_mas_cp(). To ensure this doesn't happen again, + * abstract the two locations this happens into a function called + * mas_leaf_set_meta(). + */ + mt_init_flags(mt, 0); + mtree_test_insert(mt, 21, (void *)0x2b); // 0 + mtree_test_insert(mt, 12, (void *)0x19); // 0 + mtree_test_insert(mt, 6, (void *)0xd); // 0 + mtree_test_insert(mt, 8, (void *)0x11); // 0 + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_insert(mt, 91, (void *)0xb7); // 0 + mtree_test_insert(mt, 18, (void *)0x25); // 0 + mtree_test_insert(mt, 81, (void *)0xa3); // 0 + mtree_test_store_range(mt, 0, 128, (void *)0x1); // 0 + mtree_test_store(mt , 1, (void *)0x3); // 0 + mtree_test_erase(mt, 8); // (nil) + mtree_test_insert(mt, 11, (void *)0x17); // 0 + mtree_test_insert(mt, 8, (void *)0x11); // 0 + mtree_test_insert(mt, 21, (void *)0x2b); // 0 + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_insert(mt, 18446744073709551605UL, (void *)0xffffffffffffffeb); // 0 + mtree_test_erase(mt, 18446744073709551605UL); // 0x1 + mtree_test_store_range(mt, 0, 281, (void *)0x1); // 0 + mtree_test_erase(mt, 2); // 0xffffffffffffffeb + mtree_test_insert(mt, 1211, (void *)0x977); // 0 + mtree_test_insert(mt, 111, (void *)0xdf); // 0 + mtree_test_insert(mt, 13, (void *)0x1b); // 0 + mtree_test_insert(mt, 211, (void *)0x1a7); // 0 + mtree_test_insert(mt, 11, (void *)0x17); // 0 + mtree_test_insert(mt, 5, (void *)0xb); // 0 + mtree_test_insert(mt, 1218, (void *)0x985); // 0 + mtree_test_insert(mt, 61, (void *)0x7b); // 0 + mtree_test_store(mt , 1, (void *)0x3); // 0 + mtree_test_insert(mt, 121, (void *)0xf3); // 0 + mtree_test_insert(mt, 8, (void *)0x11); // 0 + mtree_test_insert(mt, 21, (void *)0x2b); // 0 + mtree_test_insert(mt, 2, (void *)0x5); // 0 + mtree_test_insert(mt, 18446744073709551605UL, (void *)0xffffffffffffffeb); // 0 + mtree_test_erase(mt, 18446744073709551605UL); // 0x1 +} + +static DEFINE_MTREE(tree); +static int maple_tree_seed(void) +{ + unsigned long set[] = {5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 5003, 5002}; + void *ptr = &set; + + pr_info("\nTEST STARTING\n\n"); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_root_expand(&tree); + mtree_destroy(&tree); + +#if defined(BENCH_SLOT_STORE) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_slot_store(&tree); + mtree_destroy(&tree); + goto skip; +#endif +#if defined(BENCH_NODE_STORE) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_node_store(&tree); + mtree_destroy(&tree); + goto skip; +#endif +#if defined(BENCH_AWALK) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_awalk(&tree); + mtree_destroy(&tree); + goto skip; +#endif +#if defined(BENCH_WALK) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_walk(&tree); + mtree_destroy(&tree); + goto skip; +#endif +#if defined(BENCH_FORK) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_forking(&tree); + mtree_destroy(&tree); + goto skip; +#endif +#if defined(BENCH_MT_FOR_EACH) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_mt_for_each(&tree); + mtree_destroy(&tree); + goto skip; +#endif + + test_kmem_cache_bulk(); + + mt_init_flags(&tree, 0); + check_new_node(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_prealloc(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_spanning_write(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_null_expand(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + check_dfs_preorder(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_forking(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_mas_store_gfp(&tree); + mtree_destroy(&tree); + + /* Test ranges (store and insert) */ + mt_init_flags(&tree, 0); + check_ranges(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_alloc_range(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_alloc_rev_range(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + + check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */ + + check_insert(&tree, set[9], &tree); /* Insert 0 */ + check_load(&tree, set[9], &tree); /* See if 0 -> &tree */ + check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */ + + check_insert(&tree, set[10], ptr); /* Insert 5003 */ + check_load(&tree, set[9], &tree); /* See if 0 -> &tree */ + check_load(&tree, set[11], NULL); /* See if 5002 -> NULL */ + check_load(&tree, set[10], ptr); /* See if 5003 -> ptr */ + + /* Clear out the tree */ + mtree_destroy(&tree); + + /* Try to insert, insert a dup, and load back what was inserted. */ + mt_init_flags(&tree, 0); + check_insert(&tree, set[0], &tree); /* Insert 5015 */ + check_dup_insert(&tree, set[0], &tree); /* Insert 5015 again */ + check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */ + + /* + * Second set of tests try to load a value that doesn't exist, inserts + * a second value, then loads the value again + */ + check_load(&tree, set[1], NULL); /* See if 5014 -> NULL */ + check_insert(&tree, set[1], ptr); /* insert 5014 -> ptr */ + check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */ + check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */ + /* + * Tree currently contains: + * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil) + */ + check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */ + check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */ + + check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */ + check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */ + check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */ + check_load(&tree, set[7], &tree); /* 1003 = &tree ? */ + + /* Clear out tree */ + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + /* Test inserting into a NULL hole. */ + check_insert(&tree, set[5], ptr); /* insert 1001 -> ptr */ + check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */ + check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */ + check_load(&tree, set[5], ptr); /* See if 1001 -> ptr */ + check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */ + check_load(&tree, set[7], &tree); /* See if 1003 -> &tree */ + + /* Clear out the tree */ + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + check_erase_testset(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + /* + * set[] = {5015, 5014, 5017, 25, 1000, + * 1001, 1002, 1003, 1005, 0, + * 5003, 5002}; + */ + + check_insert(&tree, set[0], ptr); /* 5015 */ + check_insert(&tree, set[1], &tree); /* 5014 */ + check_insert(&tree, set[2], ptr); /* 5017 */ + check_insert(&tree, set[3], &tree); /* 25 */ + check_load(&tree, set[0], ptr); + check_load(&tree, set[1], &tree); + check_load(&tree, set[2], ptr); + check_load(&tree, set[3], &tree); + check_insert(&tree, set[4], ptr); /* 1000 < Should split. */ + check_load(&tree, set[0], ptr); + check_load(&tree, set[1], &tree); + check_load(&tree, set[2], ptr); + check_load(&tree, set[3], &tree); /*25 */ + check_load(&tree, set[4], ptr); + check_insert(&tree, set[5], &tree); /* 1001 */ + check_load(&tree, set[0], ptr); + check_load(&tree, set[1], &tree); + check_load(&tree, set[2], ptr); + check_load(&tree, set[3], &tree); + check_load(&tree, set[4], ptr); + check_load(&tree, set[5], &tree); + check_insert(&tree, set[6], ptr); + check_load(&tree, set[0], ptr); + check_load(&tree, set[1], &tree); + check_load(&tree, set[2], ptr); + check_load(&tree, set[3], &tree); + check_load(&tree, set[4], ptr); + check_load(&tree, set[5], &tree); + check_load(&tree, set[6], ptr); + check_insert(&tree, set[7], &tree); + check_load(&tree, set[0], ptr); + check_insert(&tree, set[8], ptr); + + check_insert(&tree, set[9], &tree); + + check_load(&tree, set[0], ptr); + check_load(&tree, set[1], &tree); + check_load(&tree, set[2], ptr); + check_load(&tree, set[3], &tree); + check_load(&tree, set[4], ptr); + check_load(&tree, set[5], &tree); + check_load(&tree, set[6], ptr); + check_load(&tree, set[9], &tree); + mtree_destroy(&tree); + + check_nomem(&tree); + mt_init_flags(&tree, 0); + check_seq(&tree, 16, false); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + check_seq(&tree, 1000, true); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_rev_seq(&tree, 1000, true); + mtree_destroy(&tree); + + check_lower_bound_split(&tree); + check_upper_bound_split(&tree); + check_mid_split(&tree); + + mt_init_flags(&tree, 0); + check_next_entry(&tree); + check_find(&tree); + check_find_2(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_prev_entry(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + check_erase2_sets(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_gap_combining(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_node_overwrite(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + next_prev_test(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_rcu_simulated(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_rcu_threaded(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_spanning_relatives(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_rev_find(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, 0); + check_fuzzer(&tree); + mtree_destroy(&tree); + + +#if defined(BENCH) +skip: +#endif + rcu_barrier(); + pr_info("maple_tree: %u of %u tests passed\n", + atomic_read(&maple_tree_tests_passed), + atomic_read(&maple_tree_tests_run)); + if (atomic_read(&maple_tree_tests_run) == + atomic_read(&maple_tree_tests_passed)) + return 0; + + return -EINVAL; +} + +static void maple_tree_harvest(void) +{ + +} + +module_init(maple_tree_seed); +module_exit(maple_tree_harvest); +MODULE_AUTHOR("Liam R. Howlett "); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index c4ea4fbb0bfcd..89d613e0505b3 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -4,9 +4,9 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \ -fsanitize=undefined LDFLAGS += -fsanitize=address -fsanitize=undefined LDLIBS+= -lpthread -lurcu -TARGETS = main idr-test multiorder xarray +TARGETS = main idr-test multiorder xarray maple CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o \ - slab.o + slab.o maple.o OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \ iteration_check_2.o benchmark.o @@ -29,6 +29,8 @@ idr-test: idr-test.o $(CORE_OFILES) xarray: $(CORE_OFILES) +maple: $(CORE_OFILES) + multiorder: multiorder.o $(CORE_OFILES) clean: @@ -40,6 +42,7 @@ $(OFILES): Makefile *.h */*.h generated/map-shift.h \ ../../include/linux/*.h \ ../../include/asm/*.h \ ../../../include/linux/xarray.h \ + ../../../include/linux/maple_tree.h \ ../../../include/linux/radix-tree.h \ ../../../include/linux/idr.h @@ -51,6 +54,8 @@ idr.c: ../../../lib/idr.c xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c +maple.o: ../../../lib/maple_tree.c ../../../lib/test_maple_tree.c + generated/map-shift.h: @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \ From b736acdb31085999d6400303924a65189668bf87 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Sat, 25 Jun 2022 00:39:07 +0000 Subject: [PATCH 016/321] test_maple_tree: add test for spanning store of entire range Entire range store over an existing tree would cause an underflow in mas_spanning_store() when the tree is multiple levels. Add a testcase to ensure it doesn't happen again. Link: https://lkml.kernel.org/r/20220625003854.1230114-3-Liam.Howlett@oracle.com Fixes: 51282228cdd4 (lib/test_maple_tree: add testing for maple tree) Signed-off-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 9fc0618f4ae73..8de5705b7b9b8 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -35676,6 +35676,19 @@ static noinline void check_spanning_write(struct maple_tree *mt) mtree_unlock(mt); mtree_destroy(mt); + for (i = 1; i <= max; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mtree_lock(mt); + mas_set_range(&mas, 9, 50006); /* Will expand to 0 - ULONG_MAX */ + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_set(&mas, 1205); + MT_BUG_ON(mt, mas_walk(&mas) != NULL); + mtree_unlock(mt); + mt_dump(mt); + mt_validate(mt); + mtree_destroy(mt); + /* Test spanning store that requires a right cousin rebalance */ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); for (i = 0; i <= max; i++) From ded00a5066c87f1953eb4fe5f258758c939177bd Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Wed, 29 Jun 2022 15:23:51 +0000 Subject: [PATCH 017/321] test_maple_tree: add test for spanning store to most of the tree Test spanning almost the entire tree to detect if the root is dead and the node is placed in the wrong location. Link: https://lkml.kernel.org/r/20220629152340.3451959-3-Liam.Howlett@oracle.com Fixes: ("test_maple_tree: Add test for spanning store to most of the tree") Signed-off-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 8de5705b7b9b8..b028f56488570 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -35685,7 +35685,6 @@ static noinline void check_spanning_write(struct maple_tree *mt) mas_set(&mas, 1205); MT_BUG_ON(mt, mas_walk(&mas) != NULL); mtree_unlock(mt); - mt_dump(mt); mt_validate(mt); mtree_destroy(mt); @@ -35817,6 +35816,15 @@ static noinline void check_spanning_write(struct maple_tree *mt) MT_BUG_ON(mt, mas_walk(&mas) != NULL); mtree_unlock(mt); mtree_destroy(mt); + + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i <= 100; i++) + mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); + + mtree_lock(mt); + mas_set_range(&mas, 76, 875); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + mtree_unlock(mt); } static noinline void check_null_expand(struct maple_tree *mt) From 59ed780ec7ca60d43d55faa489112ce41a84343d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:53 +0000 Subject: [PATCH 018/321] mm: start tracking VMAs with maple tree Start tracking the VMAs with the new maple tree structure in parallel with the rb_tree. Add debug and trace events for maple tree operations and duplicate the rb_tree that is created on forks into the maple tree. The maple tree is added to the mm_struct including the mm_init struct, added support in required mm/mmap functions, added tracking in kernel/fork for process forking, and used to find the unmapped_area and checked against what the rbtree finds. This also moves the mmap_lock() in exit_mmap() since the oom reaper call does walk the VMAs. Otherwise lockdep will be unhappy if oom happens. Link: https://lkml.kernel.org/r/20220504010716.661115-10-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-9-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/x86/kernel/tboot.c | 1 + drivers/firmware/efi/efi.c | 1 + include/linux/mm.h | 5 + include/linux/mm_types.h | 3 + include/trace/events/mmap.h | 73 ++++++++ kernel/fork.c | 20 ++- mm/init-mm.c | 2 + mm/mmap.c | 320 +++++++++++++++++++++++++++++++++--- mm/nommu.c | 13 ++ 9 files changed, 412 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 0c1154a1c4032..71c54ad3868a0 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -97,6 +97,7 @@ void __init tboot_probe(void) static pgd_t *tboot_pg_dir; static struct mm_struct tboot_mm = { .mm_rb = RB_ROOT, + .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock), .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 860534bcfdac2..1eddef189d689 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -58,6 +58,7 @@ static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; struct mm_struct efi_mm = { .mm_rb = RB_ROOT, + .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), diff --git a/include/linux/mm.h b/include/linux/mm.h index 125a6a3c9dbde..3e25287cb8148 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2604,6 +2604,8 @@ extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); +/* mmap.c */ +void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas); /* interval_tree.c */ void vma_interval_tree_insert(struct vm_area_struct *node, @@ -2667,6 +2669,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas); +void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas); + static inline int check_data_rlimit(unsigned long rlim, unsigned long new, unsigned long start, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6b961a29bf26f..e810aaca6c04b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -481,6 +482,7 @@ struct kioctx_table; struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ + struct maple_tree mm_mt; struct rb_root mm_rb; u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU @@ -676,6 +678,7 @@ struct mm_struct { unsigned long cpu_bitmap[]; }; +#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN) extern struct mm_struct init_mm; /* Pointer magic because the dynamic array size confuses some compilers. */ diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h index 4661f7ba07c05..216de5f036210 100644 --- a/include/trace/events/mmap.h +++ b/include/trace/events/mmap.h @@ -42,6 +42,79 @@ TRACE_EVENT(vm_unmapped_area, __entry->low_limit, __entry->high_limit, __entry->align_mask, __entry->align_offset) ); + +TRACE_EVENT(vma_mas_szero, + TP_PROTO(struct maple_tree *mt, unsigned long start, + unsigned long end), + + TP_ARGS(mt, start, end), + + TP_STRUCT__entry( + __field(struct maple_tree *, mt) + __field(unsigned long, start) + __field(unsigned long, end) + ), + + TP_fast_assign( + __entry->mt = mt; + __entry->start = start; + __entry->end = end; + ), + + TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,", + __entry->mt, + (unsigned long) __entry->start, + (unsigned long) __entry->end + ) +); + +TRACE_EVENT(vma_store, + TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma), + + TP_ARGS(mt, vma), + + TP_STRUCT__entry( + __field(struct maple_tree *, mt) + __field(struct vm_area_struct *, vma) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + ), + + TP_fast_assign( + __entry->mt = mt; + __entry->vma = vma; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end - 1; + ), + + TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,", + __entry->mt, __entry->vma, + (unsigned long) __entry->vm_start, + (unsigned long) __entry->vm_end + ) +); + + +TRACE_EVENT(exit_mmap, + TP_PROTO(struct mm_struct *mm), + + TP_ARGS(mm), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(struct maple_tree *, mt) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->mt = &mm->mm_mt; + ), + + TP_printk("mt_mod %p, DESTROY\n", + __entry->mt + ) +); + #endif /* This part must be outside protection */ diff --git a/kernel/fork.c b/kernel/fork.c index 9d44f2d46c696..1840da0732f60 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -585,6 +585,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, int retval; unsigned long charge; LIST_HEAD(uf); + MA_STATE(mas, &mm->mm_mt, 0, 0); uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { @@ -614,6 +615,10 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; khugepaged_fork(mm, oldmm); + retval = mas_expected_entries(&mas, oldmm->map_count); + if (retval) + goto out; + prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; @@ -629,7 +634,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, */ if (fatal_signal_pending(current)) { retval = -EINTR; - goto out; + goto loop_out; } if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); @@ -694,6 +699,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; + /* Link the vma into the MT */ + mas.index = tmp->vm_start; + mas.last = tmp->vm_end - 1; + mas_store(&mas, tmp); + mm->map_count++; if (!(tmp->vm_flags & VM_WIPEONFORK)) retval = copy_page_range(tmp, mpnt); @@ -702,10 +712,12 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp->vm_ops->open(tmp); if (retval) - goto out; + goto loop_out; } /* a new mm has just been created */ retval = arch_dup_mmap(oldmm, mm); +loop_out: + mas_destroy(&mas); out: mmap_write_unlock(mm); flush_tlb_mm(oldmm); @@ -721,7 +733,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); - goto out; + goto loop_out; } static inline int mm_alloc_pgd(struct mm_struct *mm) @@ -1111,6 +1123,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, { mm->mmap = NULL; mm->mm_rb = RB_ROOT; + mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); + mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); mm->vmacache_seqnum = 0; atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); diff --git a/mm/init-mm.c b/mm/init-mm.c index fbe7844d0912f..b912b0f2eceda 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -29,6 +30,7 @@ */ struct mm_struct init_mm = { .mm_rb = RB_ROOT, + .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock), .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/mm/mmap.c b/mm/mmap.c index c14d7286a3798..79654a627baf2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -381,7 +381,70 @@ static int browse_rb(struct mm_struct *mm) } return bug ? -1 : i; } +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) +extern void mt_validate(struct maple_tree *mt); +extern void mt_dump(const struct maple_tree *mt); +/* Validate the maple tree */ +static void validate_mm_mt(struct mm_struct *mm) +{ + struct maple_tree *mt = &mm->mm_mt; + struct vm_area_struct *vma_mt, *vma = mm->mmap; + + MA_STATE(mas, mt, 0, 0); + mas_for_each(&mas, vma_mt, ULONG_MAX) { + if (xa_is_zero(vma_mt)) + continue; + + if (!vma) + break; + + if ((vma != vma_mt) || + (vma->vm_start != vma_mt->vm_start) || + (vma->vm_end != vma_mt->vm_end) || + (vma->vm_start != mas.index) || + (vma->vm_end - 1 != mas.last)) { + pr_emerg("issue in %s\n", current->comm); + dump_stack(); +#ifdef CONFIG_DEBUG_VM + dump_vma(vma_mt); + pr_emerg("and next in rb\n"); + dump_vma(vma->vm_next); +#endif + pr_emerg("mt piv: %px %lu - %lu\n", vma_mt, + mas.index, mas.last); + pr_emerg("mt vma: %px %lu - %lu\n", vma_mt, + vma_mt->vm_start, vma_mt->vm_end); + pr_emerg("rb vma: %px %lu - %lu\n", vma, + vma->vm_start, vma->vm_end); + pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next, + vma->vm_next->vm_start, vma->vm_next->vm_end); + + mt_dump(mas.tree); + if (vma_mt->vm_end != mas.last + 1) { + pr_err("vma: %px vma_mt %lu-%lu\tmt %lu-%lu\n", + mm, vma_mt->vm_start, vma_mt->vm_end, + mas.index, mas.last); + mt_dump(mas.tree); + } + VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); + if (vma_mt->vm_start != mas.index) { + pr_err("vma: %px vma_mt %px %lu - %lu doesn't match\n", + mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); + mt_dump(mas.tree); + } + VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); + } + VM_BUG_ON(vma != vma_mt); + vma = vma->vm_next; + + } + VM_BUG_ON(vma); + mt_validate(&mm->mm_mt); +} +#else +#define validate_mm_mt(root) do { } while (0) +#endif static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; @@ -436,6 +499,7 @@ static void validate_mm(struct mm_struct *mm) } #else #define validate_mm_rb(root, ignore) do { } while (0) +#define validate_mm_mt(root) do { } while (0) #define validate_mm(mm) do { } while (0) #endif @@ -680,6 +744,56 @@ static void __vma_link_file(struct vm_area_struct *vma) } } +/* + * vma_mas_store() - Store a VMA in the maple tree. + * @vma: The vm_area_struct + * @mas: The maple state + * + * Efficient way to store a VMA in the maple tree when the @mas has already + * walked to the correct location. + * + * Note: the end address is inclusive in the maple tree. + */ +void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas) +{ + trace_vma_store(mas->tree, vma); + mas_set_range(mas, vma->vm_start, vma->vm_end - 1); + mas_store_prealloc(mas, vma); +} + +/* + * vma_mas_remove() - Remove a VMA from the maple tree. + * @vma: The vm_area_struct + * @mas: The maple state + * + * Efficient way to remove a VMA from the maple tree when the @mas has already + * been established and points to the correct location. + * Note: the end address is inclusive in the maple tree. + */ +void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) +{ + trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1); + mas->index = vma->vm_start; + mas->last = vma->vm_end - 1; + mas_store_prealloc(mas, NULL); +} + +/* + * vma_mas_szero() - Set a given range to zero. Used when modifying a + * vm_area_struct start or end. + * + * @mm: The struct_mm + * @start: The start address to zero + * @end: The end address to zero. + */ +static inline void vma_mas_szero(struct ma_state *mas, unsigned long start, + unsigned long end) +{ + trace_vma_mas_szero(mas->tree, start, end - 1); + mas_set_range(mas, start, end - 1); + mas_store_prealloc(mas, NULL); +} + static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, @@ -689,17 +803,22 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, __vma_link_rb(mm, vma, rb_link, rb_parent); } -static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, +static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { + MA_STATE(mas, &mm->mm_mt, 0, 0); struct address_space *mapping = NULL; + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + return -ENOMEM; + if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } + vma_mas_store(vma, &mas); __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); @@ -708,13 +827,15 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, mm->map_count++; validate_mm(mm); + return 0; } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ -static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) +static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, + struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; @@ -722,7 +843,10 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); - __vma_link(mm, vma, prev, rb_link, rb_parent); + + vma_mas_store(vma, mas); + __vma_link_list(mm, vma, prev); + __vma_link_rb(mm, vma, rb_link, rb_parent); mm->map_count++; } @@ -756,10 +880,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); + struct vm_area_struct *exporter = NULL, *importer = NULL; - if (next && !insert) { - struct vm_area_struct *exporter = NULL, *importer = NULL; + validate_mm(mm); + validate_mm_mt(mm); + if (next && !insert) { if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and @@ -842,6 +969,12 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, again: vma_adjust_trans_huge(orig_vma, start, end, adjust_next); + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + if (exporter && exporter->anon_vma) + unlink_anon_vmas(importer); + return -ENOMEM; + } + if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; @@ -882,17 +1015,28 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (start != vma->vm_start) { + unsigned long old_start = vma->vm_start; vma->vm_start = start; + if (old_start < start) + vma_mas_szero(&mas, old_start, start); start_changed = true; } if (end != vma->vm_end) { + unsigned long old_end = vma->vm_end; vma->vm_end = end; + if (old_end > end) + vma_mas_szero(&mas, end, old_end); end_changed = true; } + + if (end_changed || start_changed) + vma_mas_store(vma, &mas); + vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next; next->vm_pgoff += adjust_next >> PAGE_SHIFT; + vma_mas_store(next, &mas); } if (file) { @@ -906,6 +1050,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. + * Since we have expanded over this vma, the maple tree will + * have overwritten by storing the value */ if (remove_next != 3) __vma_unlink(mm, next, next); @@ -928,7 +1074,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - __insert_vm_struct(mm, insert); + __insert_vm_struct(mm, &mas, insert); } else { if (start_changed) vma_gap_update(vma); @@ -1025,6 +1171,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_mmap(insert); validate_mm(mm); + validate_mm_mt(mm); return 0; } @@ -1178,6 +1325,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *area, *next; int err; + validate_mm_mt(mm); /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. @@ -1253,6 +1401,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, khugepaged_enter_vma(area, vm_flags); return area; } + validate_mm_mt(mm); return NULL; } @@ -1732,6 +1881,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; + validate_mm_mt(mm); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; @@ -1846,7 +1996,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr, goto free_vma; } - vma_link(mm, vma, prev, rb_link, rb_parent); + if (vma_link(mm, vma, prev, rb_link, rb_parent)) { + error = -ENOMEM; + if (file) + goto unmap_and_free_vma; + else + goto free_vma; + } /* * vma_merge() calls khugepaged_enter_vma() either, the below @@ -1886,6 +2042,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_set_page_prot(vma); + validate_mm_mt(mm); return addr; unmap_and_free_vma: @@ -1902,6 +2059,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unacct_error: if (charged) vm_unacct_memory(charged); + validate_mm_mt(mm); return error; } @@ -1918,12 +2076,19 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; + unsigned long gap; + MA_STATE(mas, &mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; + mas_empty_area(&mas, info->low_limit, info->high_limit - 1, + length); + gap = mas.index; + gap += (info->align_offset - gap) & info->align_mask; + /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; @@ -2005,20 +2170,31 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); + + VM_BUG_ON(gap != gap_start); return gap_start; } static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma = NULL; unsigned long length, low_limit, high_limit, gap_start, gap_end; + unsigned long gap; + + MA_STATE(mas, &mm->mm_mt, 0, 0); + validate_mm_mt(mm); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; + mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, + length); + gap = mas.last + 1 - info->length; + gap -= (gap - info->align_offset) & info->align_mask; + /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). @@ -2104,6 +2280,32 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); + + if (gap != gap_end) { + pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__, + mm, gap, gap_end); + pr_err("window was %lu - %lu size %lu\n", info->high_limit, + info->low_limit, length); + pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max, + mas.last); + pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index, + info->align_mask, info->align_offset); + pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index, + find_vma(mm, mas.index), vma); +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) + mt_dump(&mm->mm_mt); +#endif + { + struct vm_area_struct *dv = mm->mmap; + + while (dv) { + printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end); + dv = dv->vm_next; + } + } + VM_BUG_ON(gap != gap_end); + } + return gap_end; } @@ -2326,7 +2528,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) vmacache_update(addr, vma); return vma; } - EXPORT_SYMBOL(find_vma); /* @@ -2399,7 +2600,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *next; unsigned long gap_addr; int error = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); + validate_mm_mt(mm); if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; @@ -2423,9 +2626,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Check that both stack segments have the same anon_vma? */ } + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + return -ENOMEM; + /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) + if (unlikely(anon_vma_prepare(vma))) { + mas_destroy(&mas); return -ENOMEM; + } /* * vma->vm_start/vm_end cannot change under us because the caller @@ -2462,6 +2670,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; + /* Overwrite old entry in mtree. */ + vma_mas_store(vma, &mas); anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); @@ -2476,6 +2686,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); + validate_mm_mt(mm); + mas_destroy(&mas); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ @@ -2489,7 +2701,9 @@ int expand_downwards(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); + validate_mm(mm); address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; @@ -2503,9 +2717,14 @@ int expand_downwards(struct vm_area_struct *vma, return -ENOMEM; } + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + return -ENOMEM; + /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) + if (unlikely(anon_vma_prepare(vma))) { + mas_destroy(&mas); return -ENOMEM; + } /* * vma->vm_start/vm_end cannot change under us because the caller @@ -2543,6 +2762,8 @@ int expand_downwards(struct vm_area_struct *vma, anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; + /* Overwrite old entry in mtree. */ + vma_mas_store(vma, &mas); anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); @@ -2554,6 +2775,7 @@ int expand_downwards(struct vm_area_struct *vma, anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); + mas_destroy(&mas); return error; } @@ -2676,14 +2898,17 @@ static void unmap_region(struct mm_struct *mm, * vma list as we go.. */ static bool -detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, unsigned long end) +detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; + mas_set_range(mas, vma->vm_start, end - 1); + mas_store_prealloc(mas, NULL); do { vma_rb_erase(vma, &mm->mm_rb); if (vma->vm_flags & VM_LOCKED) @@ -2724,6 +2949,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct *new; int err; + validate_mm_mt(mm); if (vma->vm_ops && vma->vm_ops->may_split) { err = vma->vm_ops->may_split(vma, addr); @@ -2776,6 +3002,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); + validate_mm_mt(mm); return err; } @@ -2802,6 +3029,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, { unsigned long end; struct vm_area_struct *vma, *prev, *last; + int error = -ENOMEM; + MA_STATE(mas, &mm->mm_mt, 0, 0); if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; @@ -2822,6 +3051,9 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, vma = find_vma_intersection(mm, start, end); if (!vma) return 0; + + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + return -ENOMEM; prev = vma->vm_prev; /* @@ -2832,7 +3064,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { - int error; /* * Make sure that map_count on return from munmap() will @@ -2840,20 +3071,20 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) - return -ENOMEM; + goto map_count_exceeded; error = __split_vma(mm, vma, start, 0); if (error) - return error; + goto split_failed; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { - int error = __split_vma(mm, last, end, 1); + error = __split_vma(mm, last, end, 1); if (error) - return error; + goto split_failed; } vma = vma_next(mm, prev); @@ -2867,13 +3098,13 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ - int error = userfaultfd_unmap_prep(vma, start, end, uf); + error = userfaultfd_unmap_prep(vma, start, end, uf); if (error) - return error; + goto userfaultfd_error; } /* Detach vmas from rbtree */ - if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) + if (!detach_vmas_to_be_unmapped(mm, &mas, vma, prev, end)) downgrade = false; if (downgrade) @@ -2885,6 +3116,12 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, remove_vma_list(mm, vma); return downgrade ? 1 : 0; + +map_count_exceeded: +split_failed: +userfaultfd_error: + mas_destroy(&mas); + return error; } int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, @@ -3024,6 +3261,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla pgoff_t pgoff = addr >> PAGE_SHIFT; int error; unsigned long mapped_addr; + validate_mm_mt(mm); /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) @@ -3073,7 +3311,9 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); - vma_link(mm, vma, prev, rb_link, rb_parent); + if(vma_link(mm, vma, prev, rb_link, rb_parent)) + goto no_vma_link; + out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; @@ -3081,7 +3321,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; + validate_mm_mt(mm); return 0; + +no_vma_link: + vm_area_free(vma); + return -ENOMEM; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) @@ -3170,6 +3415,9 @@ void exit_mmap(struct mm_struct *mm) vma = remove_vma(vma); cond_resched(); } + + trace_exit_mmap(mm); + __mt_destroy(&mm->mm_mt); mm->mmap = NULL; mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); @@ -3183,10 +3431,25 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; + unsigned long start = vma->vm_start; + struct vm_area_struct *overlap = NULL; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; + + overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1); + if (overlap) { + + pr_err("Found vma ending at %lu\n", start - 1); + pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap, + overlap->vm_start, overlap->vm_end - 1); +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) + mt_dump(&mm->mm_mt); +#endif + BUG(); + } + if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -3208,7 +3471,9 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - vma_link(mm, vma, prev, rb_link, rb_parent); + if (vma_link(mm, vma, prev, rb_link, rb_parent)) + return -ENOMEM; + return 0; } @@ -3226,7 +3491,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; + unsigned long index = addr; + validate_mm_mt(mm); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. @@ -3238,6 +3505,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ + if (mt_find(&mm->mm_mt, &index, addr+len - 1)) + BUG(); new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); @@ -3281,6 +3550,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } + validate_mm_mt(mm); return new_vma; out_free_mempol: @@ -3288,6 +3558,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, out_free_vma: vm_area_free(new_vma); out: + validate_mm_mt(mm); return NULL; } @@ -3424,6 +3695,7 @@ static struct vm_area_struct *__install_special_mapping( int ret; struct vm_area_struct *vma; + validate_mm_mt(mm); vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); @@ -3446,10 +3718,12 @@ static struct vm_area_struct *__install_special_mapping( perf_event_mmap(vma); + validate_mm_mt(mm); return vma; out: vm_area_free(vma); + validate_mm_mt(mm); return ERR_PTR(ret); } diff --git a/mm/nommu.c b/mm/nommu.c index 9d7afc2d959e4..5af0b050eba8d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -545,6 +545,19 @@ static void put_nommu_region(struct vm_region *region) __put_nommu_region(region); } +void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas) +{ + mas_set_range(mas, vma->vm_start, vma->vm_end - 1); + mas_store_prealloc(mas, vma); +} + +void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) +{ + mas->index = vma->vm_start; + mas->last = vma->vm_end - 1; + mas_store_prealloc(mas, NULL); +} + /* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous From 77b2cbec380eb3f496aa4762bcf4627538cd20e1 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 28 Jun 2022 15:08:19 -0400 Subject: [PATCH 019/321] mm/mmap: reorder validate_mm_mt() checks Allows for detecting broken trees before attempting to iterate over the tree. Link: https://lkml.kernel.org/r/20220629153127.p2k2cqkc7qy7f3vw@revolver Fixes: ("mm: start tracking VMAs with maple tree") Signed-off-by: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index 79654a627baf2..148b1c445d39f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -392,6 +392,8 @@ static void validate_mm_mt(struct mm_struct *mm) struct vm_area_struct *vma_mt, *vma = mm->mmap; MA_STATE(mas, mt, 0, 0); + + mt_validate(&mm->mm_mt); mas_for_each(&mas, vma_mt, ULONG_MAX) { if (xa_is_zero(vma_mt)) continue; @@ -440,7 +442,6 @@ static void validate_mm_mt(struct mm_struct *mm) } VM_BUG_ON(vma); - mt_validate(&mm->mm_mt); } #else #define validate_mm_mt(root) do { } while (0) From 135f68f0b80db23a16b1cd2e411d82986e66193e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:46:54 +0000 Subject: [PATCH 020/321] mm: add VMA iterator This thin layer of abstraction over the maple tree state is for iterating over VMAs. You can go forwards, go backwards or ask where the iterator is. Rename the existing vma_next() to __vma_next() -- it will be removed by the end of this series. Link: https://lkml.kernel.org/r/20220504010716.661115-11-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-10-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 31 +++++++++++++++++++++++++++++++ include/linux/mm_types.h | 21 +++++++++++++++++++++ mm/mmap.c | 10 +++++----- 3 files changed, 57 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3e25287cb8148..08cf4b5867987 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -658,6 +658,37 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) return vma->vm_flags & VM_ACCESS_FLAGS; } +static inline +struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) +{ + return mas_find(&vmi->mas, max); +} + +static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) +{ + /* + * Uses vma_find() to get the first VMA when the iterator starts. + * Calling mas_next() could skip the first entry. + */ + return vma_find(vmi, ULONG_MAX); +} + +static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) +{ + return mas_prev(&vmi->mas, 0); +} + +static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) +{ + return vmi->mas.index; +} + +#define for_each_vma(vmi, vma) while ((vma = vma_next(&(vmi))) != NULL) + +/* The MM code likes to work with exclusive end addresses */ +#define for_each_vma_range(vmi, vma, end) \ + while ((vma = vma_find(&(vmi), (end) - 1)) != NULL) + #ifdef CONFIG_SHMEM /* * The vma_is_shmem is not inline because it is used only by slow diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e810aaca6c04b..a0e4b638d6b4c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -696,6 +696,27 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return (struct cpumask *)&mm->cpu_bitmap; } +struct vma_iterator { + struct ma_state mas; +}; + +#define VMA_ITERATOR(name, mm, addr) \ + struct vma_iterator name = { \ + .mas = { \ + .tree = &mm->mm_mt, \ + .index = addr, \ + .node = MAS_START, \ + }, \ + } + +static inline void vma_iter_init(struct vma_iterator *vmi, + struct mm_struct *mm, unsigned long addr) +{ + vmi->mas.tree = &mm->mm_mt; + vmi->mas.index = addr; + vmi->mas.node = MAS_START; +} + struct mmu_gather; extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); diff --git a/mm/mmap.c b/mm/mmap.c index 148b1c445d39f..46fb841cf4342 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -633,7 +633,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr, } /* - * vma_next() - Get the next VMA. + * __vma_next() - Get the next VMA. * @mm: The mm_struct. * @vma: The current vma. * @@ -641,7 +641,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr, * * Returns: The next VMA after @vma. */ -static inline struct vm_area_struct *vma_next(struct mm_struct *mm, +static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, struct vm_area_struct *vma) { if (!vma) @@ -1334,7 +1334,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, if (vm_flags & VM_SPECIAL) return NULL; - next = vma_next(mm, prev); + next = __vma_next(mm, prev); area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; @@ -2882,7 +2882,7 @@ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { - struct vm_area_struct *next = vma_next(mm, prev); + struct vm_area_struct *next = __vma_next(mm, prev); struct mmu_gather tlb; lru_add_drain(); @@ -3087,7 +3087,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (error) goto split_failed; } - vma = vma_next(mm, prev); + vma = __vma_next(mm, prev); if (unlikely(uf)) { /* From f625ff561f9f3ace4475ee4e1b2673e9e7fad63b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:46:54 +0000 Subject: [PATCH 021/321] mmap: use the VMA iterator in count_vma_pages_range() This simplifies the implementation and is faster than using the linked list. Link: https://lkml.kernel.org/r/20220504010716.661115-12-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-11-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 46fb841cf4342..089ab21920da3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -676,29 +676,19 @@ munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, return 0; } + static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { - unsigned long nr_pages = 0; + VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; + unsigned long nr_pages = 0; - /* Find first overlapping mapping */ - vma = find_vma_intersection(mm, addr, end); - if (!vma) - return 0; - - nr_pages = (min(end, vma->vm_end) - - max(addr, vma->vm_start)) >> PAGE_SHIFT; - - /* Iterate over the rest of the overlaps */ - for (vma = vma->vm_next; vma; vma = vma->vm_next) { - unsigned long overlap_len; - - if (vma->vm_start > end) - break; + for_each_vma_range(vmi, vma, end) { + unsigned long vm_start = max(addr, vma->vm_start); + unsigned long vm_end = min(end, vma->vm_end); - overlap_len = min(end, vma->vm_end) - vma->vm_start; - nr_pages += overlap_len >> PAGE_SHIFT; + nr_pages += (vm_end - vm_start) / PAGE_SIZE; } return nr_pages; From 7f356c7f3442192f07e96c61dc70ce0d1b536f05 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:54 +0000 Subject: [PATCH 022/321] mm/mmap: use the maple tree in find_vma() instead of the rbtree. Using the maple tree interface mt_find() will handle the RCU locking and will start searching at the address up to the limit, ULONG_MAX in this case. Add kernel documentation to this API. Link: https://lkml.kernel.org/r/20220504010716.661115-13-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-12-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 089ab21920da3..5eaeb907ce40c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2487,11 +2487,18 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); -/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +/** + * find_vma() - Find the VMA for a given address, or the next vma. + * @mm: The mm_struct to check + * @addr: The address + * + * Returns: The VMA associated with addr, or the next vma. + * May return %NULL in the case of no vma at addr or above. + */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { - struct rb_node *rb_node; struct vm_area_struct *vma; + unsigned long index = addr; mmap_assert_locked(mm); /* Check the cache first. */ @@ -2499,22 +2506,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) if (likely(vma)) return vma; - rb_node = mm->mm_rb.rb_node; - - while (rb_node) { - struct vm_area_struct *tmp; - - tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); - - if (tmp->vm_end > addr) { - vma = tmp; - if (tmp->vm_start <= addr) - break; - rb_node = rb_node->rb_left; - } else - rb_node = rb_node->rb_right; - } - + vma = mt_find(&mm->mm_mt, &index, ULONG_MAX); if (vma) vmacache_update(addr, vma); return vma; From 6f6a479e40d2dc6e171e22c347b42a3985359759 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:55 +0000 Subject: [PATCH 023/321] mm/mmap: use the maple tree for find_vma_prev() instead of the rbtree Use the maple tree's advanced API and a maple state to walk the tree for the entry at the address of the next vma, then use the maple state to walk back one entry to find the previous entry. Add kernel documentation comments for this API. Link: https://lkml.kernel.org/r/20220504010716.661115-14-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-13-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 5eaeb907ce40c..49d47ca1f63a2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2513,23 +2513,30 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(find_vma); -/* - * Same as find_vma, but also return a pointer to the previous VMA in *pprev. +/** + * find_vma_prev() - Find the VMA for a given address, or the next vma and + * set %pprev to the previous VMA, if any. + * @mm: The mm_struct to check + * @addr: The address + * @pprev: The pointer to set to the previous VMA + * + * Note that RCU lock is missing here since the external mmap_lock() is used + * instead. + * + * Returns: The VMA associated with @addr, or the next vma. + * May return %NULL in the case of no vma at addr or above. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; + MA_STATE(mas, &mm->mm_mt, addr, addr); - vma = find_vma(mm, addr); - if (vma) { - *pprev = vma->vm_prev; - } else { - struct rb_node *rb_node = rb_last(&mm->mm_rb); - - *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL; - } + vma = mas_walk(&mas); + *pprev = mas_prev(&mas, 0); + if (!vma) + vma = mas_next(&mas, ULONG_MAX); return vma; } From 4dfcd8c907a672a8a09f6fdc565621d3756e498a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:55 +0000 Subject: [PATCH 024/321] mm/mmap: use maple tree for unmapped_area{_topdown} The maple tree code was added to find the unmapped area in a previous commit and was checked against what the rbtree returned, but the actual result was never used. Start using the maple tree implementation and remove the rbtree code. Add kernel documentation comment for these functions. Link: https://lkml.kernel.org/r/20220504010716.661115-15-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-14-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 253 +++++++----------------------------------------------- 1 file changed, 32 insertions(+), 221 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 49d47ca1f63a2..722fa59cf1260 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2054,250 +2054,61 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return error; } +/* unmapped_area() Find an area between the low_limit and the high_limit with + * the correct alignment and offset, all from @info. Note: current->mm is used + * for the search. + * + * @info: The unmapped area information including the range (low_limit - + * hight_limit), the alignment offset and mask. + * + * Return: A memory address or -ENOMEM. + */ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) { - /* - * We implement the search by looking for an rbtree node that - * immediately follows a suitable gap. That is, - * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; - * - gap_end = vma->vm_start >= info->low_limit + length; - * - gap_end - gap_start >= length - */ + unsigned long length, gap; - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long length, low_limit, high_limit, gap_start, gap_end; - unsigned long gap; - MA_STATE(mas, &mm->mm_mt, 0, 0); + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; - mas_empty_area(&mas, info->low_limit, info->high_limit - 1, - length); - gap = mas.index; - gap += (info->align_offset - gap) & info->align_mask; - - /* Adjust search limits by the desired length */ - if (info->high_limit < length) - return -ENOMEM; - high_limit = info->high_limit - length; - - if (info->low_limit > high_limit) - return -ENOMEM; - low_limit = info->low_limit + length; - - /* Check if rbtree root looks promising */ - if (RB_EMPTY_ROOT(&mm->mm_rb)) - goto check_highest; - vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); - if (vma->rb_subtree_gap < length) - goto check_highest; - - while (true) { - /* Visit left subtree if it looks promising */ - gap_end = vm_start_gap(vma); - if (gap_end >= low_limit && vma->vm_rb.rb_left) { - struct vm_area_struct *left = - rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb); - if (left->rb_subtree_gap >= length) { - vma = left; - continue; - } - } - - gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; -check_current: - /* Check if current node has a suitable gap */ - if (gap_start > high_limit) - return -ENOMEM; - if (gap_end >= low_limit && - gap_end > gap_start && gap_end - gap_start >= length) - goto found; - - /* Visit right subtree if it looks promising */ - if (vma->vm_rb.rb_right) { - struct vm_area_struct *right = - rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb); - if (right->rb_subtree_gap >= length) { - vma = right; - continue; - } - } - - /* Go back up the rbtree to find next candidate node */ - while (true) { - struct rb_node *prev = &vma->vm_rb; - if (!rb_parent(prev)) - goto check_highest; - vma = rb_entry(rb_parent(prev), - struct vm_area_struct, vm_rb); - if (prev == vma->vm_rb.rb_left) { - gap_start = vm_end_gap(vma->vm_prev); - gap_end = vm_start_gap(vma); - goto check_current; - } - } - } - -check_highest: - /* Check highest gap, which does not precede any rbtree node */ - gap_start = mm->highest_vm_end; - gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ - if (gap_start > high_limit) + if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1, + length)) return -ENOMEM; -found: - /* We found a suitable gap. Clip it with the original low_limit. */ - if (gap_start < info->low_limit) - gap_start = info->low_limit; - - /* Adjust gap address to the desired alignment */ - gap_start += (info->align_offset - gap_start) & info->align_mask; - - VM_BUG_ON(gap_start + info->length > info->high_limit); - VM_BUG_ON(gap_start + info->length > gap_end); - - VM_BUG_ON(gap != gap_start); - return gap_start; + gap = mas.index; + gap += (info->align_offset - gap) & info->align_mask; + return gap; } +/* unmapped_area_topdown() Find an area between the low_limit and the + * high_limit with * the correct alignment and offset at the highest available + * address, all from * @info. Note: current->mm is used for the search. + * + * @info: The unmapped area information including the range (low_limit - + * hight_limit), the alignment offset and mask. + * + * Return: A memory address or -ENOMEM. + */ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma = NULL; - unsigned long length, low_limit, high_limit, gap_start, gap_end; - unsigned long gap; - - MA_STATE(mas, &mm->mm_mt, 0, 0); - validate_mm_mt(mm); + unsigned long length, gap; + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; - mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, - length); - gap = mas.last + 1 - info->length; - gap -= (gap - info->align_offset) & info->align_mask; - - /* - * Adjust search limits by the desired length. - * See implementation comment at top of unmapped_area(). - */ - gap_end = info->high_limit; - if (gap_end < length) - return -ENOMEM; - high_limit = gap_end - length; - - if (info->low_limit > high_limit) + if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, + length)) return -ENOMEM; - low_limit = info->low_limit + length; - - /* Check highest gap, which does not precede any rbtree node */ - gap_start = mm->highest_vm_end; - if (gap_start <= high_limit) - goto found_highest; - - /* Check if rbtree root looks promising */ - if (RB_EMPTY_ROOT(&mm->mm_rb)) - return -ENOMEM; - vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); - if (vma->rb_subtree_gap < length) - return -ENOMEM; - - while (true) { - /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; - if (gap_start <= high_limit && vma->vm_rb.rb_right) { - struct vm_area_struct *right = - rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb); - if (right->rb_subtree_gap >= length) { - vma = right; - continue; - } - } - -check_current: - /* Check if current node has a suitable gap */ - gap_end = vm_start_gap(vma); - if (gap_end < low_limit) - return -ENOMEM; - if (gap_start <= high_limit && - gap_end > gap_start && gap_end - gap_start >= length) - goto found; - - /* Visit left subtree if it looks promising */ - if (vma->vm_rb.rb_left) { - struct vm_area_struct *left = - rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb); - if (left->rb_subtree_gap >= length) { - vma = left; - continue; - } - } - - /* Go back up the rbtree to find next candidate node */ - while (true) { - struct rb_node *prev = &vma->vm_rb; - if (!rb_parent(prev)) - return -ENOMEM; - vma = rb_entry(rb_parent(prev), - struct vm_area_struct, vm_rb); - if (prev == vma->vm_rb.rb_right) { - gap_start = vma->vm_prev ? - vm_end_gap(vma->vm_prev) : 0; - goto check_current; - } - } - } - -found: - /* We found a suitable gap. Clip it with the original high_limit. */ - if (gap_end > info->high_limit) - gap_end = info->high_limit; - -found_highest: - /* Compute highest gap address at the desired alignment */ - gap_end -= info->length; - gap_end -= (gap_end - info->align_offset) & info->align_mask; - - VM_BUG_ON(gap_end < info->low_limit); - VM_BUG_ON(gap_end < gap_start); - - if (gap != gap_end) { - pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__, - mm, gap, gap_end); - pr_err("window was %lu - %lu size %lu\n", info->high_limit, - info->low_limit, length); - pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max, - mas.last); - pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index, - info->align_mask, info->align_offset); - pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index, - find_vma(mm, mas.index), vma); -#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) - mt_dump(&mm->mm_mt); -#endif - { - struct vm_area_struct *dv = mm->mmap; - while (dv) { - printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end); - dv = dv->vm_next; - } - } - VM_BUG_ON(gap != gap_end); - } - - return gap_end; + gap = mas.last + 1 - info->length; + gap -= (gap - info->align_offset) & info->align_mask; + return gap; } /* From 7d338d544c28ab8299c7aeeee2f8227f35105cb2 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:55 +0000 Subject: [PATCH 025/321] kernel/fork: use maple tree for dup_mmap() during forking The maple tree was already tracking VMAs in this function by an earlier commit, but the rbtree iterator was being used to iterate the list. Change the iterator to use a maple tree native iterator and switch to the maple tree advanced API to avoid multiple walks of the tree during insert operations. Unexport the now-unused vma_store() function. For performance reasons we bulk allocate the maple tree nodes. The node calculations are done internally to the tree and use the VMA count and assume the worst-case node requirements. The VM_DONT_COPY flag does not allow for the most efficient copy method of the tree and so a bulk loading algorithm is used. Link: https://lkml.kernel.org/r/20220504010716.661115-16-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-15-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 -- kernel/fork.c | 12 +++++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 08cf4b5867987..ee1203d017620 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2635,8 +2635,6 @@ extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); -/* mmap.c */ -void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas); /* interval_tree.c */ void vma_interval_tree_insert(struct vm_area_struct *node, diff --git a/kernel/fork.c b/kernel/fork.c index 1840da0732f60..4ecca79556ebf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -583,9 +583,10 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; - unsigned long charge; - LIST_HEAD(uf); + unsigned long charge = 0; + MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); MA_STATE(mas, &mm->mm_mt, 0, 0); + LIST_HEAD(uf); uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { @@ -620,7 +621,12 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; prev = NULL; - for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { + + retval = mas_expected_entries(&mas, oldmm->map_count); + if (retval) + goto out; + + mas_for_each(&old_mas, mpnt, ULONG_MAX) { struct file *file; if (mpnt->vm_flags & VM_DONTCOPY) { From a2ead6a28f3300348b88e52e7643b05b97175720 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:56 +0000 Subject: [PATCH 026/321] damon: convert __damon_va_three_regions to use the VMA iterator This rather specialised walk can use the VMA iterator. If this proves to be too slow, we can write a custom routine to find the two largest gaps, but it will be somewhat complicated, so let's see if we need it first. Update the kunit test case to use the maple tree. This also fixes an issue with the kunit testcase not adding the last VMA to the list. Link: https://lkml.kernel.org/r/20220504011215.661968-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-16-Liam.Howlett@oracle.com Fixes: 17ccae8bb5c9 (mm/damon: add kunit tests) Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: SeongJae Park Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: David Howells Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/damon/vaddr-test.h | 36 ++++++++++------------------- mm/damon/vaddr.c | 53 ++++++++++++++++++++++--------------------- 2 files changed, 39 insertions(+), 50 deletions(-) diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index d4f55f3491007..bce37c4875402 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -14,33 +14,19 @@ #include -static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas) +static void __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas, + ssize_t nr_vmas) { - int i, j; - unsigned long largest_gap, gap; + int i; + MA_STATE(mas, mt, 0, 0); if (!nr_vmas) return; - for (i = 0; i < nr_vmas - 1; i++) { - vmas[i].vm_next = &vmas[i + 1]; - - vmas[i].vm_rb.rb_left = NULL; - vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb; - - largest_gap = 0; - for (j = i; j < nr_vmas; j++) { - if (j == 0) - continue; - gap = vmas[j].vm_start - vmas[j - 1].vm_end; - if (gap > largest_gap) - largest_gap = gap; - } - vmas[i].rb_subtree_gap = largest_gap; - } - vmas[i].vm_next = NULL; - vmas[i].vm_rb.rb_right = NULL; - vmas[i].rb_subtree_gap = 0; + mas_lock(&mas); + for (i = 0; i < nr_vmas; i++) + vma_mas_store(&vmas[i], &mas); + mas_unlock(&mas); } /* @@ -72,6 +58,7 @@ static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas) */ static void damon_test_three_regions_in_vmas(struct kunit *test) { + static struct mm_struct mm; struct damon_addr_range regions[3] = {0,}; /* 10-20-25, 200-210-220, 300-305, 307-330 */ struct vm_area_struct vmas[] = { @@ -83,9 +70,10 @@ static void damon_test_three_regions_in_vmas(struct kunit *test) (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, }; - __link_vmas(vmas, 6); + mt_init_flags(&mm.mm_mt, MM_MT_FLAGS); + __link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)); - __damon_va_three_regions(&vmas[0], regions); + __damon_va_three_regions(&mm, regions); KUNIT_EXPECT_EQ(test, 10ul, regions[0].start); KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 3c7b9d6dca95d..d24148a8149fa 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -113,37 +113,38 @@ static unsigned long sz_range(struct damon_addr_range *r) * * Returns 0 if success, or negative error code otherwise. */ -static int __damon_va_three_regions(struct vm_area_struct *vma, +static int __damon_va_three_regions(struct mm_struct *mm, struct damon_addr_range regions[3]) { - struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0}; - struct vm_area_struct *last_vma = NULL; - unsigned long start = 0; - struct rb_root rbroot; - - /* Find two biggest gaps so that first_gap > second_gap > others */ - for (; vma; vma = vma->vm_next) { - if (!last_vma) { - start = vma->vm_start; - goto next; - } + struct damon_addr_range first_gap = {0}, second_gap = {0}; + VMA_ITERATOR(vmi, mm, 0); + struct vm_area_struct *vma, *prev = NULL; + unsigned long start; - if (vma->rb_subtree_gap <= sz_range(&second_gap)) { - rbroot.rb_node = &vma->vm_rb; - vma = rb_entry(rb_last(&rbroot), - struct vm_area_struct, vm_rb); + /* + * Find the two biggest gaps so that first_gap > second_gap > others. + * If this is too slow, it can be optimised to examine the maple + * tree gaps. + */ + for_each_vma(vmi, vma) { + unsigned long gap; + + if (!prev) { + start = vma->vm_start; goto next; } - - gap.start = last_vma->vm_end; - gap.end = vma->vm_start; - if (sz_range(&gap) > sz_range(&second_gap)) { - swap(gap, second_gap); - if (sz_range(&second_gap) > sz_range(&first_gap)) - swap(second_gap, first_gap); + gap = vma->vm_start - prev->vm_end; + + if (gap > sz_range(&first_gap)) { + second_gap = first_gap; + first_gap.start = prev->vm_end; + first_gap.end = vma->vm_start; + } else if (gap > sz_range(&second_gap)) { + second_gap.start = prev->vm_end; + second_gap.end = vma->vm_start; } next: - last_vma = vma; + prev = vma; } if (!sz_range(&second_gap) || !sz_range(&first_gap)) @@ -159,7 +160,7 @@ static int __damon_va_three_regions(struct vm_area_struct *vma, regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); - regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION); + regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); return 0; } @@ -180,7 +181,7 @@ static int damon_va_three_regions(struct damon_target *t, return -EINVAL; mmap_read_lock(mm); - rc = __damon_va_three_regions(mm->mmap, regions); + rc = __damon_va_three_regions(mm, regions); mmap_read_unlock(mm); mmput(mm); From 1bd85ff196287148a56d1a0f4d5e46a6b58b1f8d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:46:56 +0000 Subject: [PATCH 027/321] proc: remove VMA rbtree use from nommu These users of the rbtree should probably have been walks of the linked list, but convert them to use walks of the maple tree. Link: https://lkml.kernel.org/r/20220504011345.662299-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-17-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/proc/task_nommu.c | 45 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index a6d21fc0033c6..2fd06f52b6a44 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -20,15 +20,13 @@ */ void task_mem(struct seq_file *m, struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; struct vm_region *region; - struct rb_node *p; unsigned long bytes = 0, sbytes = 0, slack = 0, size; - - mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + mmap_read_lock(mm); + for_each_vma(vmi, vma) { bytes += kobjsize(vma); region = vma->vm_region; @@ -82,15 +80,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) unsigned long task_vsize(struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; - struct rb_node *p; unsigned long vsize = 0; mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + for_each_vma(vmi, vma) vsize += vma->vm_end - vma->vm_start; - } mmap_read_unlock(mm); return vsize; } @@ -99,14 +95,13 @@ unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; struct vm_region *region; - struct rb_node *p; unsigned long size = kobjsize(mm); mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + for_each_vma(vmi, vma) { size += kobjsize(vma); region = vma->vm_region; if (region) { @@ -190,17 +185,19 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) */ static int show_map(struct seq_file *m, void *_p) { - struct rb_node *p = _p; - - return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); + return nommu_vma_show(m, _p); } static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_maps_private *priv = m->private; struct mm_struct *mm; - struct rb_node *p; - loff_t n = *pos; + struct vm_area_struct *vma; + unsigned long addr = *pos; + + /* See m_next(). Zero at the start or after lseek. */ + if (addr == -1UL) + return NULL; /* pin the task and mm whilst we play with them */ priv->task = get_proc_task(priv->inode); @@ -216,10 +213,10 @@ static void *m_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-EINTR); } - /* start from the Nth VMA */ - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) - if (n-- == 0) - return p; + /* start the next element from addr */ + vma = find_vma(mm, addr); + if (vma) + return vma; mmap_read_unlock(mm); mmput(mm); @@ -242,10 +239,10 @@ static void m_stop(struct seq_file *m, void *_vml) static void *m_next(struct seq_file *m, void *_p, loff_t *pos) { - struct rb_node *p = _p; + struct vm_area_struct *vma = _p; - (*pos)++; - return p ? rb_next(p) : NULL; + *pos = vma->vm_end; + return find_vma(vma->vm_mm, vma->vm_end); } static const struct seq_operations proc_pid_maps_ops = { From e76cf92b91ead50a4bbbcc3e6107e282f608359f Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:56 +0000 Subject: [PATCH 028/321] mm: remove rb tree. Remove the RB tree and start using the maple tree for vm_area_struct tracking. Drop validate_mm() calls in expand_upwards() and expand_downwards() as the lock is not held. Link: https://lkml.kernel.org/r/20220504011345.662299-2-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-18-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/x86/kernel/tboot.c | 1 - drivers/firmware/efi/efi.c | 1 - include/linux/mm.h | 2 - include/linux/mm_types.h | 14 - kernel/fork.c | 8 - mm/init-mm.c | 2 - mm/mmap.c | 509 ++++++++----------------------------- mm/nommu.c | 87 ++----- mm/util.c | 10 +- 9 files changed, 144 insertions(+), 490 deletions(-) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 71c54ad3868a0..3b388330a1063 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -96,7 +96,6 @@ void __init tboot_probe(void) static pgd_t *tboot_pg_dir; static struct mm_struct tboot_mm = { - .mm_rb = RB_ROOT, .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock), .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 1eddef189d689..07677fde00af5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -57,7 +57,6 @@ static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; struct mm_struct efi_mm = { - .mm_rb = RB_ROOT, .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/include/linux/mm.h b/include/linux/mm.h index ee1203d017620..560c6cc4994fe 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2690,8 +2690,6 @@ extern int __split_vma(struct mm_struct *, struct vm_area_struct *, extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); -extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, - struct rb_node **, struct rb_node *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a0e4b638d6b4c..0e226d390fca6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -405,19 +405,6 @@ struct vm_area_struct { /* linked list of VM areas per task, sorted by address */ struct vm_area_struct *vm_next, *vm_prev; - - struct rb_node vm_rb; - - /* - * Largest free memory gap in bytes to the left of this VMA. - * Either between this VMA and vma->vm_prev, or between one of the - * VMAs below us in the VMA rbtree and its ->vm_prev. This helps - * get_unmapped_area find a free area of the right size. - */ - unsigned long rb_subtree_gap; - - /* Second cache line starts here. */ - struct mm_struct *vm_mm; /* The address space we belong to. */ /* @@ -483,7 +470,6 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct maple_tree mm_mt; - struct rb_root mm_rb; u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, diff --git a/kernel/fork.c b/kernel/fork.c index 4ecca79556ebf..db420a1e93c7d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -581,7 +581,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; - struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge = 0; MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); @@ -608,8 +607,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; - rb_link = &mm->mm_rb.rb_node; - rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) @@ -701,10 +698,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp->vm_prev = prev; prev = tmp; - __vma_link_rb(mm, tmp, rb_link, rb_parent); - rb_link = &tmp->vm_rb.rb_right; - rb_parent = &tmp->vm_rb; - /* Link the vma into the MT */ mas.index = tmp->vm_start; mas.last = tmp->vm_end - 1; @@ -1128,7 +1121,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { mm->mmap = NULL; - mm->mm_rb = RB_ROOT; mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); mm->vmacache_seqnum = 0; diff --git a/mm/init-mm.c b/mm/init-mm.c index b912b0f2eceda..c9327abb771c5 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include #include #include @@ -29,7 +28,6 @@ * and size this cpu_bitmask to NR_CPUS. */ struct mm_struct init_mm = { - .mm_rb = RB_ROOT, .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock), .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), diff --git a/mm/mmap.c b/mm/mmap.c index 722fa59cf1260..fb8108bc0a643 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -294,93 +293,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) return origbrk; } -static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) -{ - unsigned long gap, prev_end; - - /* - * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we - * allow two stack_guard_gaps between them here, and when choosing - * an unmapped area; whereas when expanding we only require one. - * That's a little inconsistent, but keeps the code here simpler. - */ - gap = vm_start_gap(vma); - if (vma->vm_prev) { - prev_end = vm_end_gap(vma->vm_prev); - if (gap > prev_end) - gap -= prev_end; - else - gap = 0; - } - return gap; -} - -#ifdef CONFIG_DEBUG_VM_RB -static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) -{ - unsigned long max = vma_compute_gap(vma), subtree_gap; - if (vma->vm_rb.rb_left) { - subtree_gap = rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb)->rb_subtree_gap; - if (subtree_gap > max) - max = subtree_gap; - } - if (vma->vm_rb.rb_right) { - subtree_gap = rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb)->rb_subtree_gap; - if (subtree_gap > max) - max = subtree_gap; - } - return max; -} - -static int browse_rb(struct mm_struct *mm) -{ - struct rb_root *root = &mm->mm_rb; - int i = 0, j, bug = 0; - struct rb_node *nd, *pn = NULL; - unsigned long prev = 0, pend = 0; - - for (nd = rb_first(root); nd; nd = rb_next(nd)) { - struct vm_area_struct *vma; - vma = rb_entry(nd, struct vm_area_struct, vm_rb); - if (vma->vm_start < prev) { - pr_emerg("vm_start %lx < prev %lx\n", - vma->vm_start, prev); - bug = 1; - } - if (vma->vm_start < pend) { - pr_emerg("vm_start %lx < pend %lx\n", - vma->vm_start, pend); - bug = 1; - } - if (vma->vm_start > vma->vm_end) { - pr_emerg("vm_start %lx > vm_end %lx\n", - vma->vm_start, vma->vm_end); - bug = 1; - } - spin_lock(&mm->page_table_lock); - if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { - pr_emerg("free gap %lx, correct %lx\n", - vma->rb_subtree_gap, - vma_compute_subtree_gap(vma)); - bug = 1; - } - spin_unlock(&mm->page_table_lock); - i++; - pn = nd; - prev = vma->vm_start; - pend = vma->vm_end; - } - j = 0; - for (nd = pn; nd; nd = rb_prev(nd)) - j++; - if (i != j) { - pr_emerg("backwards %d, forwards %d\n", j, i); - bug = 1; - } - return bug ? -1 : i; -} #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) extern void mt_validate(struct maple_tree *mt); extern void mt_dump(const struct maple_tree *mt); @@ -408,19 +320,25 @@ static void validate_mm_mt(struct mm_struct *mm) (vma->vm_end - 1 != mas.last)) { pr_emerg("issue in %s\n", current->comm); dump_stack(); -#ifdef CONFIG_DEBUG_VM dump_vma(vma_mt); - pr_emerg("and next in rb\n"); + pr_emerg("and vm_next\n"); dump_vma(vma->vm_next); -#endif pr_emerg("mt piv: %px %lu - %lu\n", vma_mt, mas.index, mas.last); pr_emerg("mt vma: %px %lu - %lu\n", vma_mt, vma_mt->vm_start, vma_mt->vm_end); - pr_emerg("rb vma: %px %lu - %lu\n", vma, + if (vma->vm_prev) { + pr_emerg("ll prev: %px %lu - %lu\n", + vma->vm_prev, vma->vm_prev->vm_start, + vma->vm_prev->vm_end); + } + pr_emerg("ll vma: %px %lu - %lu\n", vma, vma->vm_start, vma->vm_end); - pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next, - vma->vm_next->vm_start, vma->vm_next->vm_end); + if (vma->vm_next) { + pr_emerg("ll next: %px %lu - %lu\n", + vma->vm_next, vma->vm_next->vm_start, + vma->vm_next->vm_end); + } mt_dump(mas.tree); if (vma_mt->vm_end != mas.last + 1) { @@ -443,21 +361,6 @@ static void validate_mm_mt(struct mm_struct *mm) } VM_BUG_ON(vma); } -#else -#define validate_mm_mt(root) do { } while (0) -#endif -static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) -{ - struct rb_node *nd; - - for (nd = rb_first(root); nd; nd = rb_next(nd)) { - struct vm_area_struct *vma; - vma = rb_entry(nd, struct vm_area_struct, vm_rb); - VM_BUG_ON_VMA(vma != ignore && - vma->rb_subtree_gap != vma_compute_subtree_gap(vma), - vma); - } -} static void validate_mm(struct mm_struct *mm) { @@ -466,7 +369,10 @@ static void validate_mm(struct mm_struct *mm) unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; + validate_mm_mt(mm); + while (vma) { +#ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; @@ -476,6 +382,7 @@ static void validate_mm(struct mm_struct *mm) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } +#endif highest_address = vm_end_gap(vma); vma = vma->vm_next; @@ -490,80 +397,13 @@ static void validate_mm(struct mm_struct *mm) mm->highest_vm_end, highest_address); bug = 1; } - i = browse_rb(mm); - if (i != mm->map_count) { - if (i != -1) - pr_emerg("map_count %d rb %d\n", mm->map_count, i); - bug = 1; - } VM_BUG_ON_MM(bug, mm); } -#else -#define validate_mm_rb(root, ignore) do { } while (0) + +#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ #define validate_mm_mt(root) do { } while (0) #define validate_mm(mm) do { } while (0) -#endif - -RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, - struct vm_area_struct, vm_rb, - unsigned long, rb_subtree_gap, vma_compute_gap) - -/* - * Update augmented rbtree rb_subtree_gap values after vma->vm_start or - * vma->vm_prev->vm_end values changed, without modifying the vma's position - * in the rbtree. - */ -static void vma_gap_update(struct vm_area_struct *vma) -{ - /* - * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created - * a callback function that does exactly what we want. - */ - vma_gap_callbacks_propagate(&vma->vm_rb, NULL); -} - -static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) -{ - /* All rb_subtree_gap values must be consistent prior to insertion */ - validate_mm_rb(root, NULL); - - rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); -} - -static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) -{ - /* - * Note rb_erase_augmented is a fairly large inline function, - * so make sure we instantiate it only once with our desired - * augmented rbtree callbacks. - */ - rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); -} - -static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, - struct rb_root *root, - struct vm_area_struct *ignore) -{ - /* - * All rb_subtree_gap values must be consistent prior to erase, - * with the possible exception of - * - * a. the "next" vma being erased if next->vm_start was reduced in - * __vma_adjust() -> __vma_unlink() - * b. the vma being erased in detach_vmas_to_be_unmapped() -> - * vma_rb_erase() - */ - validate_mm_rb(root, ignore); - - __vma_rb_erase(vma, root); -} - -static __always_inline void vma_rb_erase(struct vm_area_struct *vma, - struct rb_root *root) -{ - vma_rb_erase_ignore(vma, root, vma); -} +#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ /* * vma has some anon_vma assigned, and is already inserted on that @@ -597,39 +437,26 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } -static int find_vma_links(struct mm_struct *mm, unsigned long addr, - unsigned long end, struct vm_area_struct **pprev, - struct rb_node ***rb_link, struct rb_node **rb_parent) +/* + * range_has_overlap() - Check the @start - @end range for overlapping VMAs and + * sets up a pointer to the previous VMA + * @mm: the mm struct + * @start: the start address of the range + * @end: the end address of the range + * @pprev: the pointer to the pointer of the previous VMA + * + * Returns: True if there is an overlapping VMA, false otherwise + */ +static inline +bool range_has_overlap(struct mm_struct *mm, unsigned long start, + unsigned long end, struct vm_area_struct **pprev) { - struct rb_node **__rb_link, *__rb_parent, *rb_prev; - - mmap_assert_locked(mm); - __rb_link = &mm->mm_rb.rb_node; - rb_prev = __rb_parent = NULL; - - while (*__rb_link) { - struct vm_area_struct *vma_tmp; + struct vm_area_struct *existing; - __rb_parent = *__rb_link; - vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); - - if (vma_tmp->vm_end > addr) { - /* Fail if an existing vma overlaps the area */ - if (vma_tmp->vm_start < end) - return -ENOMEM; - __rb_link = &__rb_parent->rb_left; - } else { - rb_prev = __rb_parent; - __rb_link = &__rb_parent->rb_right; - } - } - - *pprev = NULL; - if (rb_prev) - *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); - *rb_link = __rb_link; - *rb_parent = __rb_parent; - return 0; + MA_STATE(mas, &mm->mm_mt, start, start); + existing = mas_find(&mas, end - 1); + *pprev = mas_prev(&mas, 0); + return existing ? true : false; } /* @@ -656,8 +483,6 @@ static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, * @start: The start of the range. * @len: The length of the range. * @pprev: pointer to the pointer that will be set to previous vm_area_struct - * @rb_link: the rb_node - * @rb_parent: the parent rb_node * * Find all the vm_area_struct that overlap from @start to * @end and munmap them. Set @pprev to the previous vm_area_struct. @@ -666,14 +491,11 @@ static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, */ static inline int munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, - struct vm_area_struct **pprev, struct rb_node ***link, - struct rb_node **parent, struct list_head *uf) + struct vm_area_struct **pprev, struct list_head *uf) { - - while (find_vma_links(mm, start, start + len, pprev, link, parent)) + while (range_has_overlap(mm, start, start + len, pprev)) if (do_munmap(mm, start, len, uf)) return -ENOMEM; - return 0; } @@ -694,30 +516,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, return nr_pages; } -void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, - struct rb_node **rb_link, struct rb_node *rb_parent) -{ - /* Update tracking information for the gap following the new vma. */ - if (vma->vm_next) - vma_gap_update(vma->vm_next); - else - mm->highest_vm_end = vm_end_gap(vma); - - /* - * vma->vm_prev wasn't known when we followed the rbtree to find the - * correct insertion point for that vma. As a result, we could not - * update the vma vm_rb parents rb_subtree_gap values on the way down. - * So, we first insert the vma with a zero rb_subtree_gap value - * (to be consistent with what we did on the way down), and then - * immediately update the gap to the correct value. Finally we - * rebalance the rbtree after all augmented values have been set. - */ - rb_link_node(&vma->vm_rb, rb_parent, rb_link); - vma->rb_subtree_gap = 0; - vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); -} - static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; @@ -785,18 +583,8 @@ static inline void vma_mas_szero(struct ma_state *mas, unsigned long start, mas_store_prealloc(mas, NULL); } -static void -__vma_link(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, struct rb_node **rb_link, - struct rb_node *rb_parent) -{ - __vma_link_list(mm, vma, prev); - __vma_link_rb(mm, vma, rb_link, rb_parent); -} - static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, struct rb_node **rb_link, - struct rb_node *rb_parent) + struct vm_area_struct *prev) { MA_STATE(mas, &mm->mm_mt, 0, 0); struct address_space *mapping = NULL; @@ -810,7 +598,7 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, } vma_mas_store(vma, &mas); - __vma_link(mm, vma, prev, rb_link, rb_parent); + __vma_link_list(mm, vma, prev); __vma_link_file(vma); if (mapping) @@ -823,34 +611,20 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the - * mm's list and rbtree. It has already been inserted into the interval tree. + * mm's list and the mm tree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, struct vm_area_struct *vma) { struct vm_area_struct *prev; - struct rb_node **rb_link, *rb_parent; - - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) - BUG(); + mas_set(mas, vma->vm_start); + prev = mas_prev(mas, 0); vma_mas_store(vma, mas); __vma_link_list(mm, vma, prev); - __vma_link_rb(mm, vma, rb_link, rb_parent); mm->map_count++; } -static __always_inline void __vma_unlink(struct mm_struct *mm, - struct vm_area_struct *vma, - struct vm_area_struct *ignore) -{ - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); - __vma_unlink_list(mm, vma); - /* Kill the cache */ - vmacache_invalidate(mm); -} - /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. @@ -863,20 +637,18 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; + struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end); + struct vm_area_struct *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; - bool start_changed = false, end_changed = false; + bool vma_changed = false; long adjust_next = 0; int remove_next = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); struct vm_area_struct *exporter = NULL, *importer = NULL; - validate_mm(mm); - validate_mm_mt(mm); - if (next && !insert) { if (end >= next->vm_end) { /* @@ -906,8 +678,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); + next_next = find_vma(mm, next->vm_end); VM_WARN_ON(remove_next == 2 && - end != next->vm_next->vm_end); + end != next_next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } @@ -1006,21 +779,21 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (start != vma->vm_start) { - unsigned long old_start = vma->vm_start; + if (vma->vm_start < start) + vma_mas_szero(&mas, vma->vm_start, start); + vma_changed = true; vma->vm_start = start; - if (old_start < start) - vma_mas_szero(&mas, old_start, start); - start_changed = true; } if (end != vma->vm_end) { - unsigned long old_end = vma->vm_end; + if (vma->vm_end > end) + vma_mas_szero(&mas, end, vma->vm_end); + vma_changed = true; vma->vm_end = end; - if (old_end > end) - vma_mas_szero(&mas, end, old_end); - end_changed = true; + if (!next) + mm->highest_vm_end = vm_end_gap(vma); } - if (end_changed || start_changed) + if (vma_changed) vma_mas_store(vma, &mas); vma->vm_pgoff = pgoff; @@ -1038,25 +811,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (remove_next) { - /* - * vma_merge has merged next into vma, and needs - * us to remove next before dropping the locks. - * Since we have expanded over this vma, the maple tree will - * have overwritten by storing the value - */ - if (remove_next != 3) - __vma_unlink(mm, next, next); - else - /* - * vma is not before next if they've been - * swapped. - * - * pre-swap() next->vm_start was reduced so - * tell validate_mm_rb to ignore pre-swap() - * "next" (which is stored in post-swap() - * "vma"). - */ - __vma_unlink(mm, next, vma); + __vma_unlink_list(mm, next); + /* Kill the cache */ + vmacache_invalidate(mm); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { @@ -1066,15 +823,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * (it may either follow vma or precede it). */ __insert_vm_struct(mm, &mas, insert); - } else { - if (start_changed) - vma_gap_update(vma); - if (end_changed) { - if (!next) - mm->highest_vm_end = vm_end_gap(vma); - else if (!adjust_next) - vma_gap_update(next); - } } if (anon_vma) { @@ -1101,7 +849,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); + BUG_ON(vma->vm_end < next->vm_end); vm_area_free(next); + /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter @@ -1114,7 +864,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * "next->vm_prev->vm_end" changed and the * "vma->vm_next" gap must be updated. */ - next = vma->vm_next; + next = next_next; } else { /* * For the scope of the comment "next" and @@ -1129,13 +879,11 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, next = vma; } if (remove_next == 2) { + mas_reset(&mas); remove_next = 1; end = next->vm_end; goto again; - } - else if (next) - vma_gap_update(next); - else { + } else if (!next) { /* * If remove_next == 2 we obviously can't * reach this path. @@ -1162,8 +910,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_mmap(insert); validate_mm(mm); - validate_mm_mt(mm); - return 0; } @@ -1316,7 +1062,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *area, *next; int err; - validate_mm_mt(mm); /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. @@ -1392,7 +1137,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, khugepaged_enter_vma(area, vm_flags); return area; } - validate_mm_mt(mm); return NULL; } @@ -1562,6 +1306,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags_t vm_flags; int pkey = 0; + validate_mm(mm); *populate = 0; if (!len) @@ -1869,10 +1614,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev, *merge; int error; - struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; - validate_mm_mt(mm); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; @@ -1888,8 +1631,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return -ENOMEM; } - /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ - if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) + /* Clear old maps, set up prev and uf */ + if (munmap_vma_range(mm, addr, len, &prev, uf)) return -ENOMEM; /* * Private writable mapping: check memory availability @@ -1987,7 +1730,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, goto free_vma; } - if (vma_link(mm, vma, prev, rb_link, rb_parent)) { + if (vma_link(mm, vma, prev)) { error = -ENOMEM; if (file) goto unmap_and_free_vma; @@ -2033,7 +1776,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_set_page_prot(vma); - validate_mm_mt(mm); return addr; unmap_and_free_vma: @@ -2050,7 +1792,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unacct_error: if (charged) vm_unacct_memory(charged); - validate_mm_mt(mm); return error; } @@ -2403,7 +2144,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int error = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); - validate_mm_mt(mm); if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; @@ -2455,15 +2195,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) error = acct_stack_growth(vma, size, grow); if (!error) { /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_lock - * lock here, so we need to protect against - * concurrent vma expansions. - * anon_vma_lock_write() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. + * We only hold a shared mmap_lock lock here, so + * we need to protect against concurrent vma + * expansions. anon_vma_lock_write() doesn't + * help here, as we don't guarantee that all + * growable vmas in a mm share the same root + * anon vma. So, we reuse mm->page_table_lock + * to guard against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) @@ -2474,9 +2212,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Overwrite old entry in mtree. */ vma_mas_store(vma, &mas); anon_vma_interval_tree_post_update_vma(vma); - if (vma->vm_next) - vma_gap_update(vma->vm_next); - else + if (!vma->vm_next) mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); @@ -2486,8 +2222,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); - validate_mm(mm); - validate_mm_mt(mm); mas_destroy(&mas); return error; } @@ -2496,15 +2230,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ -int expand_downwards(struct vm_area_struct *vma, - unsigned long address) +int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); - validate_mm(mm); address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; @@ -2546,15 +2278,13 @@ int expand_downwards(struct vm_area_struct *vma, error = acct_stack_growth(vma, size, grow); if (!error) { /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_lock - * lock here, so we need to protect against - * concurrent vma expansions. - * anon_vma_lock_write() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. + * We only hold a shared mmap_lock lock here, so + * we need to protect against concurrent vma + * expansions. anon_vma_lock_write() doesn't + * help here, as we don't guarantee that all + * growable vmas in a mm share the same root + * anon vma. So, we reuse mm->page_table_lock + * to guard against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) @@ -2566,7 +2296,6 @@ int expand_downwards(struct vm_area_struct *vma, /* Overwrite old entry in mtree. */ vma_mas_store(vma, &mas); anon_vma_interval_tree_post_update_vma(vma); - vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2575,7 +2304,6 @@ int expand_downwards(struct vm_area_struct *vma, } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); - validate_mm(mm); mas_destroy(&mas); return error; } @@ -2708,10 +2436,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; - mas_set_range(mas, vma->vm_start, end - 1); - mas_store_prealloc(mas, NULL); + vma_mas_szero(mas, vma->vm_start, end); do { - vma_rb_erase(vma, &mm->mm_rb); if (vma->vm_flags & VM_LOCKED) mm->locked_vm -= vma_pages(vma); mm->map_count--; @@ -2719,10 +2445,9 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; - if (vma) { + if (vma) vma->vm_prev = prev; - vma_gap_update(vma); - } else + else mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; @@ -2841,11 +2566,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (len == 0) return -EINVAL; - /* - * arch_unmap() might do unmaps itself. It must be called - * and finish any rbtree manipulation before this code - * runs and also starts to manipulate the rbtree. - */ + /* arch_unmap() might do unmaps itself. */ arch_unmap(mm, start, end); /* Find the first overlapping VMA where start < vma->vm_end */ @@ -2856,6 +2577,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (mas_preallocate(&mas, vma, GFP_KERNEL)) return -ENOMEM; prev = vma->vm_prev; + /* we have start < vma->vm_end */ + + /* if it doesn't overlap, we have nothing.. */ + if (vma->vm_start >= end) + return 0; /* * If we need to split any vma, do it now to save pain later. @@ -2916,6 +2642,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* Fix up all other VM information */ remove_vma_list(mm, vma); + + validate_mm(mm); return downgrade ? 1 : 0; map_count_exceeded: @@ -3054,11 +2782,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) +static int do_brk_flags(unsigned long addr, unsigned long len, + unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; - struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; unsigned long mapped_addr; @@ -3077,8 +2805,8 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla if (error) return error; - /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ - if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) + /* Clear old maps, set up prev and uf */ + if (munmap_vma_range(mm, addr, len, &prev, uf)) return -ENOMEM; /* Check against address space limits *after* clearing old maps... */ @@ -3112,7 +2840,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); - if(vma_link(mm, vma, prev, rb_link, rb_parent)) + if(vma_link(mm, vma, prev)) goto no_vma_link; out: @@ -3231,26 +2959,10 @@ void exit_mmap(struct mm_struct *mm) int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; - struct rb_node **rb_link, *rb_parent; - unsigned long start = vma->vm_start; - struct vm_area_struct *overlap = NULL; - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) + if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev)) return -ENOMEM; - overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1); - if (overlap) { - - pr_err("Found vma ending at %lu\n", start - 1); - pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap, - overlap->vm_start, overlap->vm_end - 1); -#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) - mt_dump(&mm->mm_mt); -#endif - BUG(); - } - if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -3272,7 +2984,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - if (vma_link(mm, vma, prev, rb_link, rb_parent)) + if (vma_link(mm, vma, prev)) return -ENOMEM; return 0; @@ -3290,9 +3002,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; - struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; - unsigned long index = addr; validate_mm_mt(mm); /* @@ -3304,10 +3014,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, faulted_in_anon_vma = false; } - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) + if (range_has_overlap(mm, addr, addr + len, &prev)) return NULL; /* should never get here */ - if (mt_find(&mm->mm_mt, &index, addr+len - 1)) - BUG(); + new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); @@ -3348,12 +3057,16 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); + if (vma_link(mm, new_vma, prev)) + goto out_vma_link; *need_rmap_locks = false; } validate_mm_mt(mm); return new_vma; +out_vma_link: + if (new_vma->vm_ops && new_vma->vm_ops->close) + new_vma->vm_ops->close(new_vma); out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: diff --git a/mm/nommu.c b/mm/nommu.c index 5af0b050eba8d..f2031f865dbb7 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -566,9 +566,9 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) */ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *pvma, *prev; struct address_space *mapping; - struct rb_node **p, *parent, *rb_prev; + struct vm_area_struct *prev; + MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); BUG_ON(!vma->vm_region); @@ -586,42 +586,10 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) i_mmap_unlock_write(mapping); } + prev = mas_prev(&mas, 0); + mas_reset(&mas); /* add the VMA to the tree */ - parent = rb_prev = NULL; - p = &mm->mm_rb.rb_node; - while (*p) { - parent = *p; - pvma = rb_entry(parent, struct vm_area_struct, vm_rb); - - /* sort by: start addr, end addr, VMA struct addr in that order - * (the latter is necessary as we may get identical VMAs) */ - if (vma->vm_start < pvma->vm_start) - p = &(*p)->rb_left; - else if (vma->vm_start > pvma->vm_start) { - rb_prev = parent; - p = &(*p)->rb_right; - } else if (vma->vm_end < pvma->vm_end) - p = &(*p)->rb_left; - else if (vma->vm_end > pvma->vm_end) { - rb_prev = parent; - p = &(*p)->rb_right; - } else if (vma < pvma) - p = &(*p)->rb_left; - else if (vma > pvma) { - rb_prev = parent; - p = &(*p)->rb_right; - } else - BUG(); - } - - rb_link_node(&vma->vm_rb, parent, p); - rb_insert_color(&vma->vm_rb, &mm->mm_rb); - - /* add VMA to the VMA list also */ - prev = NULL; - if (rb_prev) - prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); - + vma_mas_store(vma, &mas); __vma_link_list(mm, vma, prev); } @@ -634,6 +602,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) struct address_space *mapping; struct mm_struct *mm = vma->vm_mm; struct task_struct *curr = current; + MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { @@ -656,8 +625,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) } /* remove from the MM's tree and list */ - rb_erase(&vma->vm_rb, &mm->mm_rb); - + vma_mas_remove(vma, &mas); __vma_unlink_list(mm, vma); } @@ -681,24 +649,19 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; + MA_STATE(mas, &mm->mm_mt, addr, addr); /* check the cache first */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; - /* trawl the list (there may be multiple mappings in which addr - * resides) */ - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (vma->vm_start > addr) - return NULL; - if (vma->vm_end > addr) { - vmacache_update(addr, vma); - return vma; - } - } + vma = mas_walk(&mas); - return NULL; + if (vma) + vmacache_update(addr, vma); + + return vma; } EXPORT_SYMBOL(find_vma); @@ -730,26 +693,23 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, { struct vm_area_struct *vma; unsigned long end = addr + len; + MA_STATE(mas, &mm->mm_mt, addr, addr); /* check the cache first */ vma = vmacache_find_exact(mm, addr, end); if (vma) return vma; - /* trawl the list (there may be multiple mappings in which addr - * resides) */ - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (vma->vm_start < addr) - continue; - if (vma->vm_start > addr) - return NULL; - if (vma->vm_end == end) { - vmacache_update(addr, vma); - return vma; - } - } + vma = mas_walk(&mas); + if (!vma) + return NULL; + if (vma->vm_start != addr) + return NULL; + if (vma->vm_end != end) + return NULL; - return NULL; + vmacache_update(addr, vma); + return vma; } /* @@ -1546,6 +1506,7 @@ void exit_mmap(struct mm_struct *mm) delete_vma(mm, vma); cond_resched(); } + __mt_destroy(&mm->mm_mt); } int vm_brk(unsigned long addr, unsigned long len) diff --git a/mm/util.c b/mm/util.c index 5df8f2db7ca95..812365cbdd197 100644 --- a/mm/util.c +++ b/mm/util.c @@ -288,6 +288,8 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, vma->vm_next = next; if (next) next->vm_prev = vma; + else + mm->highest_vm_end = vm_end_gap(vma); } void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) @@ -300,8 +302,14 @@ void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) prev->vm_next = next; else mm->mmap = next; - if (next) + if (next) { next->vm_prev = prev; + } else { + if (prev) + mm->highest_vm_end = vm_end_gap(prev); + else + mm->highest_vm_end = 0; + } } /* Check if the vma is being used as a stack by this task */ From 237a479193c039bdd09ee4fa5b4620ae5cb364b1 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:57 +0000 Subject: [PATCH 029/321] mmap: change zeroing of maple tree in __vma_adjust() Only write to the maple tree if we are not inserting or the insert isn't going to overwrite the area to clear. This avoids spanning writes and node coealescing when unnecessary. The change requires a custom search for the linked list addition to find the correct VMA for the prev link. Link: https://lkml.kernel.org/r/20220504011345.662299-3-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-19-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index fb8108bc0a643..64d3091e496ee 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -614,11 +614,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, * mm's list and the mm tree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma) + struct vm_area_struct *vma, unsigned long location) { struct vm_area_struct *prev; - mas_set(mas, vma->vm_start); + mas_set(mas, location); prev = mas_prev(mas, 0); vma_mas_store(vma, mas); __vma_link_list(mm, vma, prev); @@ -648,6 +648,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, int remove_next = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); struct vm_area_struct *exporter = NULL, *importer = NULL; + unsigned long ll_prev = vma->vm_start; /* linked list prev. */ if (next && !insert) { if (end >= next->vm_end) { @@ -779,15 +780,27 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (start != vma->vm_start) { - if (vma->vm_start < start) + if ((vma->vm_start < start) && + (!insert || (insert->vm_end != start))) { vma_mas_szero(&mas, vma->vm_start, start); - vma_changed = true; + VM_WARN_ON(insert && insert->vm_start > vma->vm_start); + } else { + vma_changed = true; + } vma->vm_start = start; } if (end != vma->vm_end) { - if (vma->vm_end > end) - vma_mas_szero(&mas, end, vma->vm_end); - vma_changed = true; + if (vma->vm_end > end) { + if (!insert || (insert->vm_start != end)) { + vma_mas_szero(&mas, end, vma->vm_end); + VM_WARN_ON(insert && + insert->vm_end < vma->vm_end); + } else if (insert->vm_start == end) { + ll_prev = vma->vm_end; + } + } else { + vma_changed = true; + } vma->vm_end = end; if (!next) mm->highest_vm_end = vm_end_gap(vma); @@ -822,7 +835,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - __insert_vm_struct(mm, &mas, insert); + __insert_vm_struct(mm, &mas, insert, ll_prev); } if (anon_vma) { @@ -909,6 +922,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (insert && file) uprobe_mmap(insert); + mas_destroy(&mas); validate_mm(mm); return 0; } From 1286a00f125c8713349273bbee232aeacfcb785e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:57 +0000 Subject: [PATCH 030/321] xen: use vma_lookup() in privcmd_ioctl_mmap() vma_lookup() walks the VMA tree for a specific value, find_vma() will search the tree after walking to a specific value. It is more efficient to only walk to the requested value since privcmd_ioctl_mmap() will exit the loop if vm_start != msg->va. Link: https://lkml.kernel.org/r/20220504011345.662299-4-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-20-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- drivers/xen/privcmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 3369734108af2..ad17166b0ef6b 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -282,7 +282,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata) struct page, lru); struct privcmd_mmap_entry *msg = page_address(page); - vma = find_vma(mm, msg->va); + vma = vma_lookup(mm, msg->va); rc = -EINVAL; if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) From 2eb83d03e396226160fe58f54cfceb546787d7de Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:57 +0000 Subject: [PATCH 031/321] mm: optimize find_exact_vma() to use vma_lookup() Use vma_lookup() to walk the tree to the start value requested. If the vma at the start does not match, then the answer is NULL and there is no need to look at the next vma the way that find_vma() would. Link: https://lkml.kernel.org/r/20220504011345.662299-5-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-21-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 560c6cc4994fe..9b562a7cf6185 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2886,7 +2886,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end) { - struct vm_area_struct *vma = find_vma(mm, vm_start); + struct vm_area_struct *vma = vma_lookup(mm, vm_start); if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) vma = NULL; From 54beeb0c5f18755f10c05ac6ee1f4bccee45d77e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:58 +0000 Subject: [PATCH 032/321] mm/khugepaged: optimize collapse_pte_mapped_thp() by using vma_lookup() vma_lookup() will walk the vma tree once and not continue to look for the next vma. Since the exact vma is checked below, this is a more optimal way of searching. Link: https://lkml.kernel.org/r/20220504011345.662299-6-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-22-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/khugepaged.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 01e0d6336754e..aa5888cc9ef22 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1419,7 +1419,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { unsigned long haddr = addr & HPAGE_PMD_MASK; - struct vm_area_struct *vma = find_vma(mm, haddr); + struct vm_area_struct *vma = vma_lookup(mm, haddr); struct page *hpage; pte_t *start_pte, *pte; pmd_t *pmd; From b1738275edbcbe53330ca2055f9a083e8603e15a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:58 +0000 Subject: [PATCH 033/321] mm/mmap: change do_brk_flags() to expand existing VMA and add do_brk_munmap() Avoid allocating a new VMA when it a vma modification can occur. When a brk() can expand or contract a VMA, then the single store operation will only modify one index of the maple tree instead of causing a node to split or coalesce. This avoids unnecessary allocations/frees of maple tree nodes and VMAs. Move some limit & flag verifications out of the do_brk_flags() function to use only relevant checks in the code path of bkr() and vm_brk_flags(). Set the vma to check if it can expand in vm_brk_flags() if extra criteria are met. Drop userfaultfd from do_brk_flags() path and only use it in vm_brk_flags() path since that is the only place a munmap will happen. Link: https://lkml.kernel.org/r/20220504011345.662299-7-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-23-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 286 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 228 insertions(+), 58 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 64d3091e496ee..5033010ee50d4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -194,17 +194,40 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, - struct list_head *uf); +/* + * check_brk_limits() - Use platform specific check of range & verify mlock + * limits. + * @addr: The address to check + * @len: The size of increase. + * + * Return: 0 on success. + */ +static int check_brk_limits(unsigned long addr, unsigned long len) +{ + unsigned long mapped_addr; + + mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (IS_ERR_VALUE(mapped_addr)) + return mapped_addr; + + return mlock_future_check(current->mm, current->mm->def_flags, len); +} +static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long newbrk, unsigned long oldbrk, + struct list_head *uf); +static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *brkvma, + unsigned long addr, unsigned long request, + unsigned long flags); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; - struct vm_area_struct *next; + struct vm_area_struct *brkvma, *next = NULL; unsigned long min_brk; bool populate; bool downgraded = false; LIST_HEAD(uf); + MA_STATE(mas, &mm->mm_mt, 0, 0); if (mmap_write_lock_killable(mm)) return -EINTR; @@ -246,35 +269,52 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) /* * Always allow shrinking brk. - * __do_munmap() may downgrade mmap_lock to read. + * do_brk_munmap() may downgrade mmap_lock to read. */ if (brk <= mm->brk) { int ret; + /* Search one past newbrk */ + mas_set(&mas, newbrk); + brkvma = mas_find(&mas, oldbrk); + BUG_ON(brkvma == NULL); + if (brkvma->vm_start >= oldbrk) + goto out; /* mapping intersects with an existing non-brk vma. */ /* - * mm->brk must to be protected by write mmap_lock so update it - * before downgrading mmap_lock. When __do_munmap() fails, - * mm->brk will be restored from origbrk. + * mm->brk must be protected by write mmap_lock. + * do_brk_munmap() may downgrade the lock, so update it + * before calling do_brk_munmap(). */ mm->brk = brk; - ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); - if (ret < 0) { - mm->brk = origbrk; - goto out; - } else if (ret == 1) { + mas.last = oldbrk - 1; + ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf); + if (ret == 1) { downgraded = true; - } - goto success; + goto success; + } else if (!ret) + goto success; + + mm->brk = origbrk; + goto out; } - /* Check against existing mmap mappings. */ - next = find_vma(mm, oldbrk); + if (check_brk_limits(oldbrk, newbrk - oldbrk)) + goto out; + + /* + * Only check if the next VMA is within the stack_guard_gap of the + * expansion area + */ + mas_set(&mas, oldbrk); + next = mas_find(&mas, newbrk - 1 + PAGE_SIZE + stack_guard_gap); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; + brkvma = mas_prev(&mas, mm->start_brk); /* Ok, looks good - let it rip. */ - if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) + if (do_brk_flags(&mas, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) goto out; + mm->brk = brk; success: @@ -2792,38 +2832,113 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, } /* - * this is really a simplified "do_mmap". it only handles - * anonymous maps. eventually we may be able to do some - * brk-specific accounting here. + * brk_munmap() - Unmap a parital vma. + * @mas: The maple tree state. + * @vma: The vma to be modified + * @newbrk: the start of the address to unmap + * @oldbrk: The end of the address to unmap + * @uf: The userfaultfd list_head + * + * Returns: 1 on success. + * unmaps a partial VMA mapping. Does not handle alignment, downgrades lock if + * possible. */ -static int do_brk_flags(unsigned long addr, unsigned long len, - unsigned long flags, struct list_head *uf) +static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long newbrk, unsigned long oldbrk, + struct list_head *uf) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev; - pgoff_t pgoff = addr >> PAGE_SHIFT; - int error; - unsigned long mapped_addr; - validate_mm_mt(mm); + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct unmap; + unsigned long unmap_pages; + int ret = 1; + + arch_unmap(mm, newbrk, oldbrk); + + if (likely((vma->vm_end < oldbrk) || + ((vma->vm_start == newbrk) && (vma->vm_end == oldbrk)))) { + /* remove entire mapping(s) */ + mas_set(mas, newbrk); + if (vma->vm_start != newbrk) + mas_reset(mas); /* cause a re-walk for the first overlap. */ + ret = __do_munmap(mm, newbrk, oldbrk - newbrk, uf, true); + goto munmap_full_vma; + } + + vma_init(&unmap, mm); + unmap.vm_start = newbrk; + unmap.vm_end = oldbrk; + ret = userfaultfd_unmap_prep(&unmap, newbrk, oldbrk, uf); + if (ret) + return ret; + ret = 1; - /* Until we need other flags, refuse anything except VM_EXEC. */ - if ((flags & (~VM_EXEC)) != 0) - return -EINVAL; - flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + /* Change the oldbrk of vma to the newbrk of the munmap area */ + vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0); + if (mas_preallocate(mas, vma, GFP_KERNEL)) + return -ENOMEM; - mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); - if (IS_ERR_VALUE(mapped_addr)) - return mapped_addr; + if (vma->anon_vma) { + anon_vma_lock_write(vma->anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + } - error = mlock_future_check(mm, mm->def_flags, len); - if (error) - return error; + vma->vm_end = newbrk; + vma_init(&unmap, mm); + unmap.vm_start = newbrk; + unmap.vm_end = oldbrk; + if (vma->anon_vma) + vma_set_anonymous(&unmap); - /* Clear old maps, set up prev and uf */ - if (munmap_vma_range(mm, addr, len, &prev, uf)) - return -ENOMEM; + vma_mas_remove(&unmap, mas); + + vmacache_invalidate(vma->vm_mm); + if (vma->anon_vma) { + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); + } + + unmap_pages = vma_pages(&unmap); + if (vma->vm_flags & VM_LOCKED) + mm->locked_vm -= unmap_pages; + + mmap_write_downgrade(mm); + unmap_region(mm, &unmap, vma, newbrk, oldbrk); + /* Statistics */ + vm_stat_account(mm, vma->vm_flags, -unmap_pages); + if (vma->vm_flags & VM_ACCOUNT) + vm_unacct_memory(unmap_pages); + +munmap_full_vma: + validate_mm_mt(mm); + return ret; +} + +/* + * do_brk_flags() - Increase the brk vma if the flags match. + * @mas: The maple tree state. + * @addr: The start address + * @len: The length of the increase + * @vma: The vma, + * @flags: The VMA Flags + * + * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags + * do not match then create a new anonymous VMA. Eventually we may be able to + * do some brk-specific accounting here. + */ +static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long addr, unsigned long len, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *prev = NULL; + validate_mm_mt(mm); - /* Check against address space limits *after* clearing old maps... */ + + /* + * Check against address space limits by the changed size + * Note: This happens *after* clearing old mappings in some code paths. + */ + flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; @@ -2833,30 +2948,56 @@ static int do_brk_flags(unsigned long addr, unsigned long len, if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; - /* Can we just expand an old private anonymous mapping? */ - vma = vma_merge(mm, prev, addr, addr + len, flags, - NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL); - if (vma) - goto out; - /* - * create a vma struct for an anonymous mapping + * Expand the existing vma if possible; Note that singular lists do not + * occur after forking, so the expand will only happen on new VMAs. */ - vma = vm_area_alloc(mm); - if (!vma) { - vm_unacct_memory(len >> PAGE_SHIFT); - return -ENOMEM; + if (vma && + (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) && + ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { + mas->index = vma->vm_start; + mas->last = addr + len - 1; + vma_adjust_trans_huge(vma, addr, addr + len, 0); + if (vma->anon_vma) { + anon_vma_lock_write(vma->anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + } + vma->vm_end = addr + len; + vma->vm_flags |= VM_SOFTDIRTY; + if (mas_store_gfp(mas, vma, GFP_KERNEL)) + return -ENOMEM; + + if (vma->anon_vma) { + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); + } + khugepaged_enter_vma(vma, flags); + goto out; } + prev = vma; + + /* create a vma struct for an anonymous mapping */ + vma = vm_area_alloc(mm); + if (!vma) + goto vma_alloc_fail; vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; - vma->vm_pgoff = pgoff; + vma->vm_pgoff = addr >> PAGE_SHIFT; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); - if(vma_link(mm, vma, prev)) - goto no_vma_link; + mas_set_range(mas, vma->vm_start, addr + len - 1); + if ( mas_store_gfp(mas, vma, GFP_KERNEL)) + goto mas_store_fail; + mm->map_count++; + + if (!prev) + prev = mas_prev(mas, 0); + + __vma_link_list(mm, vma, prev); + mm->map_count++; out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; @@ -2867,18 +3008,22 @@ static int do_brk_flags(unsigned long addr, unsigned long len, validate_mm_mt(mm); return 0; -no_vma_link: +mas_store_fail: vm_area_free(vma); +vma_alloc_fail: + vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; unsigned long len; int ret; bool populate; LIST_HEAD(uf); + MA_STATE(mas, &mm->mm_mt, addr, addr); len = PAGE_ALIGN(request); if (len < request) @@ -2889,13 +3034,38 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) if (mmap_write_lock_killable(mm)) return -EINTR; - ret = do_brk_flags(addr, len, flags, &uf); + /* Until we need other flags, refuse anything except VM_EXEC. */ + if ((flags & (~VM_EXEC)) != 0) + return -EINVAL; + + ret = check_brk_limits(addr, len); + if (ret) + goto limits_failed; + + if (find_vma_intersection(mm, addr, addr + len)) + ret = do_munmap(mm, addr, len, &uf); + + if (ret) + goto munmap_failed; + + vma = mas_prev(&mas, 0); + if (!vma || vma->vm_end != addr || vma_policy(vma) || + !can_vma_merge_after(vma, flags, NULL, NULL, + addr >> PAGE_SHIFT,NULL_VM_UFFD_CTX, NULL)) + vma = NULL; + + ret = do_brk_flags(&mas, vma, addr, len, flags); populate = ((mm->def_flags & VM_LOCKED) != 0); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; + +munmap_failed: +limits_failed: + mmap_write_unlock(mm); + return ret; } EXPORT_SYMBOL(vm_brk_flags); From 2931288b5a9a3e930483760aa54454cabc211c47 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:58 +0000 Subject: [PATCH 034/321] mm: use maple tree operations for find_vma_intersection() Move find_vma_intersection() to mmap.c and change implementation to maple tree. When searching for a vma within a range, it is easier to use the maple tree interface. Exported find_vma_intersection() for kvm module. Link: https://lkml.kernel.org/r/20220504011345.662299-8-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-24-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 22 ++++------------------ mm/mmap.c | 29 +++++++++++++++++++++++++++++ mm/nommu.c | 11 +++++++++++ 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 9b562a7cf6185..f78c1406a7bdb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2814,26 +2814,12 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/** - * find_vma_intersection() - Look up the first VMA which intersects the interval - * @mm: The process address space. - * @start_addr: The inclusive start user address. - * @end_addr: The exclusive end user address. - * - * Returns: The first VMA within the provided range, %NULL otherwise. Assumes - * start_addr < end_addr. +/* + * Look up the first VMA which intersects the interval [start_addr, end_addr) + * NULL if none. Assume start_addr < end_addr. */ -static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, - unsigned long start_addr, - unsigned long end_addr) -{ - struct vm_area_struct *vma = find_vma(mm, start_addr); - - if (vma && end_addr <= vma->vm_start) - vma = NULL; - return vma; -} + unsigned long start_addr, unsigned long end_addr); /** * vma_lookup() - Find a VMA at a specific address diff --git a/mm/mmap.c b/mm/mmap.c index 5033010ee50d4..ae9ef62c6f49e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2093,6 +2093,35 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) +{ + struct vm_area_struct *vma; + unsigned long index = start_addr; + + mmap_assert_locked(mm); + /* Check the cache first. */ + vma = vmacache_find(mm, start_addr); + if (likely(vma)) + return vma; + + vma = mt_find(&mm->mm_mt, &index, end_addr - 1); + if (vma) + vmacache_update(start_addr, vma); + return vma; +} +EXPORT_SYMBOL(find_vma_intersection); + /** * find_vma() - Find the VMA for a given address, or the next vma. * @mm: The mm_struct to check diff --git a/mm/nommu.c b/mm/nommu.c index f2031f865dbb7..b098c02511376 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -642,6 +642,17 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) vm_area_free(vma); } +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) +{ + unsigned long index = start_addr; + + mmap_assert_locked(mm); + return mt_find(&mm->mm_mt, &index, end_addr - 1); +} +EXPORT_SYMBOL(find_vma_intersection); + /* * look up the first VMA in which addr resides, NULL if none * - should be called with mm->mmap_lock at least held readlocked From f56c70a82228b8346a6922f83b36afefc1017340 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:59 +0000 Subject: [PATCH 035/321] mm/mmap: use advanced maple tree API for mmap_region() Changing mmap_region() to use the maple tree state and the advanced maple tree interface allows for a lot less tree walking. This change removes the last caller of munmap_vma_range(), so drop this unused function. Add vma_expand() to expand a VMA if possible by doing the necessary hugepage check, uprobe_munmap of files, dcache flush, modifications then undoing the detaches, etc. Link: https://lkml.kernel.org/r/20220504011345.662299-9-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220519020341.rr3s6b4dr7o36cqb@revolver Link: https://lkml.kernel.org/r/20220621204632.3370049-25-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 252 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 204 insertions(+), 48 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index ae9ef62c6f49e..4363945deea34 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -517,28 +517,6 @@ static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, return vma->vm_next; } -/* - * munmap_vma_range() - munmap VMAs that overlap a range. - * @mm: The mm struct - * @start: The start of the range. - * @len: The length of the range. - * @pprev: pointer to the pointer that will be set to previous vm_area_struct - * - * Find all the vm_area_struct that overlap from @start to - * @end and munmap them. Set @pprev to the previous vm_area_struct. - * - * Returns: -ENOMEM on munmap failure or 0 on success. - */ -static inline int -munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, - struct vm_area_struct **pprev, struct list_head *uf) -{ - while (range_has_overlap(mm, start, start + len, pprev)) - if (do_munmap(mm, start, len, uf)) - return -ENOMEM; - return 0; -} - static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { @@ -665,6 +643,130 @@ static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, mm->map_count++; } +/* + * vma_expand - Expand an existing VMA + * + * @mas: The maple state + * @vma: The vma to expand + * @start: The start of the vma + * @end: The exclusive end of the vma + * @pgoff: The page offset of vma + * @next: The current of next vma. + * + * Expand @vma to @start and @end. Can expand off the start and end. Will + * expand over @next if it's different from @vma and @end == @next->vm_end. + * Checking if the @vma can expand and merge with @next needs to be handled by + * the caller. + * + * Returns: 0 on success + */ +inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff, + struct vm_area_struct *next) +{ + struct mm_struct *mm = vma->vm_mm; + struct address_space *mapping = NULL; + struct rb_root_cached *root = NULL; + struct anon_vma *anon_vma = vma->anon_vma; + struct file *file = vma->vm_file; + bool remove_next = false; + bool anon_cloned = false; + + if (next && (vma != next) && (end == next->vm_end)) { + remove_next = true; + if (next->anon_vma && !vma->anon_vma) { + int error; + + vma->anon_vma = next->anon_vma; + error = anon_vma_clone(vma, next); + if (error) + return error; + anon_cloned = true; + } + } + + /* Not merging but overwriting any part of next is not handled. */ + VM_BUG_ON(next && !remove_next && next != vma && end > next->vm_start); + /* Only handles expanding */ + VM_BUG_ON(vma->vm_start < start || vma->vm_end > end); + + if (mas_preallocate(mas, vma, GFP_KERNEL)) + goto nomem; + + vma_adjust_trans_huge(vma, start, end, 0); + + if (anon_vma) { + anon_vma_lock_write(anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + } + + if (file) { + mapping = file->f_mapping; + root = &mapping->i_mmap; + uprobe_munmap(vma, vma->vm_start, vma->vm_end); + i_mmap_lock_write(mapping); + flush_dcache_mmap_lock(mapping); + vma_interval_tree_remove(vma, root); + } + + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; + /* Note: mas must be pointing to the expanding VMA */ + vma_mas_store(vma, mas); + + if (file) { + vma_interval_tree_insert(vma, root); + flush_dcache_mmap_unlock(mapping); + } + + /* Expanding over the next vma */ + if (remove_next) { + /* Remove from mm linked list - also updates highest_vm_end */ + __vma_unlink_list(mm, next); + + /* Kill the cache */ + vmacache_invalidate(mm); + + if (file) + __remove_shared_vm_struct(next, file, mapping); + + } else if (!next) { + mm->highest_vm_end = vm_end_gap(vma); + } + + if (file) { + i_mmap_unlock_write(mapping); + uprobe_mmap(vma); + } + + if (anon_vma) { + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(anon_vma); + } + + + if (remove_next) { + if (file) { + uprobe_munmap(next, next->vm_start, next->vm_end); + fput(file); + } + if (next->anon_vma) + anon_vma_merge(vma, next); + mm->map_count--; + mpol_put(vma_policy(next)); + vm_area_free(next); + } + + validate_mm(mm); + return 0; + +nomem: + if (anon_cloned) + unlink_anon_vmas(vma); + return -ENOMEM; +} + /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. @@ -1666,9 +1768,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr, struct list_head *uf) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev, *merge; - int error; + struct vm_area_struct *vma = NULL; + struct vm_area_struct *next, *prev, *merge; + pgoff_t pglen = len >> PAGE_SHIFT; unsigned long charged = 0; + unsigned long end = addr + len; + unsigned long merge_start = addr, merge_end = end; + pgoff_t vm_pgoff; + int error; + MA_STATE(mas, &mm->mm_mt, addr, end - 1); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { @@ -1678,16 +1786,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ - nr_pages = count_vma_pages_range(mm, addr, addr + len); + nr_pages = count_vma_pages_range(mm, addr, end); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } - /* Clear old maps, set up prev and uf */ - if (munmap_vma_range(mm, addr, len, &prev, uf)) + /* Unmap any existing mapping in the area */ + if (do_munmap(mm, addr, len, uf)) return -ENOMEM; + /* * Private writable mapping: check memory availability */ @@ -1698,14 +1807,43 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vm_flags |= VM_ACCOUNT; } - /* - * Can we just expand an old mapping? - */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, - NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL); - if (vma) - goto out; + next = mas_next(&mas, ULONG_MAX); + prev = mas_prev(&mas, 0); + if (vm_flags & VM_SPECIAL) + goto cannot_expand; + + /* Attempt to expand an old mapping */ + /* Check next */ + if (next && next->vm_start == end && !vma_policy(next) && + can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, + NULL_VM_UFFD_CTX, NULL)) { + merge_end = next->vm_end; + vma = next; + vm_pgoff = next->vm_pgoff - pglen; + } + + /* Check prev */ + if (prev && prev->vm_end == addr && !vma_policy(prev) && + (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, + pgoff, vma->vm_userfaultfd_ctx, NULL) : + can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, + NULL_VM_UFFD_CTX , NULL))) { + merge_start = prev->vm_start; + vma = prev; + vm_pgoff = prev->vm_pgoff; + } + + + /* Actually expand, if possible */ + if (vma && + !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) { + khugepaged_enter_vma(vma, vm_flags); + goto expanded; + } + mas.index = addr; + mas.last = end - 1; +cannot_expand: /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but @@ -1718,7 +1856,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } vma->vm_start = addr; - vma->vm_end = addr + len; + vma->vm_end = end; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; @@ -1739,28 +1877,32 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM - * Bug: If addr is changed, prev, rb_link, rb_parent should - * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; + mas_reset(&mas); - /* If vm_flags changed after call_mmap(), we should try merge vma again - * as we may succeed this time. + /* + * If vm_flags changed after call_mmap(), we should try merge + * vma again as we may succeed this time. */ if (unlikely(vm_flags != vma->vm_flags && prev)) { merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); if (merge) { - /* ->mmap() can change vma->vm_file and fput the original file. So - * fput the vma->vm_file here or we would add an extra fput for file - * and cause general protection fault ultimately. + /* + * ->mmap() can change vma->vm_file and fput + * the original file. So fput the vma->vm_file + * here or we would add an extra fput for file + * and cause general protection fault + * ultimately. */ fput(vma->vm_file); vm_area_free(vma); vma = merge; /* Update vm_flags to pick up the change. */ + addr = vma->vm_start; vm_flags = vma->vm_flags; goto unmap_writable; } @@ -1784,7 +1926,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, goto free_vma; } - if (vma_link(mm, vma, prev)) { + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { error = -ENOMEM; if (file) goto unmap_and_free_vma; @@ -1792,6 +1934,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr, goto free_vma; } + if (vma->vm_file) + i_mmap_lock_write(vma->vm_file->f_mapping); + + vma_mas_store(vma, &mas); + __vma_link_list(mm, vma, prev); + mm->map_count++; + if (vma->vm_file) { + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(vma->vm_file->f_mapping); + + flush_dcache_mmap_lock(vma->vm_file->f_mapping); + vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); + flush_dcache_mmap_unlock(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); + } + /* * vma_merge() calls khugepaged_enter_vma() either, the below * call covers the non-merge case. @@ -1803,7 +1961,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (file && vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); file = vma->vm_file; -out: +expanded: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); @@ -1830,6 +1988,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_set_page_prot(vma); + validate_mm(mm); return addr; unmap_and_free_vma: @@ -1846,6 +2005,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unacct_error: if (charged) vm_unacct_memory(charged); + validate_mm(mm); return error; } @@ -2662,10 +2822,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, prev = vma->vm_prev; /* we have start < vma->vm_end */ - /* if it doesn't overlap, we have nothing.. */ - if (vma->vm_start >= end) - return 0; - /* * If we need to split any vma, do it now to save pain later. * From d06b39f7be08fbb5582648b5a1a881073a2dfad5 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:59 +0000 Subject: [PATCH 036/321] mm: remove vmacache By using the maple tree and the maple tree state, the vmacache is no longer beneficial and is complicating the VMA code. Remove the vmacache to reduce the work in keeping it up to date and code complexity. Link: https://lkml.kernel.org/r/20220504011345.662299-10-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-26-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/exec.c | 3 - fs/proc/task_mmu.c | 1 - include/linux/mm_types.h | 1 - include/linux/mm_types_task.h | 12 ---- include/linux/sched.h | 1 - include/linux/vm_event_item.h | 4 -- include/linux/vmacache.h | 28 -------- include/linux/vmstat.h | 6 -- kernel/debug/debug_core.c | 12 ---- kernel/fork.c | 5 -- lib/Kconfig.debug | 8 --- mm/Makefile | 2 +- mm/debug.c | 4 +- mm/mmap.c | 32 +--------- mm/nommu.c | 37 ++--------- mm/vmacache.c | 117 ---------------------------------- mm/vmstat.c | 4 -- 17 files changed, 9 insertions(+), 268 deletions(-) delete mode 100644 include/linux/vmacache.h delete mode 100644 mm/vmacache.c diff --git a/fs/exec.c b/fs/exec.c index 0989fb8472a18..b97afa682ffe9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -1027,8 +1026,6 @@ static int exec_mmap(struct mm_struct *mm) activate_mm(active_mm, mm); if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) local_irq_enable(); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); task_unlock(tsk); if (old_mm) { mmap_read_unlock(old_mm); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 751c19d5bfdd9..d2011d9242c0e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include #include #include diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0e226d390fca6..040bfc207cb1a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -470,7 +470,6 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct maple_tree mm_mt; - u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index c1bc6731125cb..0bb4b6da99939 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -24,18 +24,6 @@ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) -/* - * The per task VMA cache array: - */ -#define VMACACHE_BITS 2 -#define VMACACHE_SIZE (1U << VMACACHE_BITS) -#define VMACACHE_MASK (VMACACHE_SIZE - 1) - -struct vmacache { - u64 seqnum; - struct vm_area_struct *vmas[VMACACHE_SIZE]; -}; - /* * When updating this, please also update struct resident_page_types[] in * kernel/fork.c diff --git a/include/linux/sched.h b/include/linux/sched.h index c46f3a63b758f..dc131048d46aa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -859,7 +859,6 @@ struct task_struct { struct mm_struct *active_mm; /* Per-thread vma caching: */ - struct vmacache vmacache; #ifdef SPLIT_RSS_COUNTING struct task_rss_stat rss_stat; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 404024486fa53..1ce8fadb2b1c7 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -122,10 +122,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ONE, #endif /* CONFIG_DEBUG_TLBFLUSH */ -#ifdef CONFIG_DEBUG_VM_VMACACHE - VMACACHE_FIND_CALLS, - VMACACHE_FIND_HITS, -#endif #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h deleted file mode 100644 index 6fce268a4588e..0000000000000 --- a/include/linux/vmacache.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_VMACACHE_H -#define __LINUX_VMACACHE_H - -#include -#include - -static inline void vmacache_flush(struct task_struct *tsk) -{ - memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); -} - -extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); -extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, - unsigned long addr); - -#ifndef CONFIG_MMU -extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, - unsigned long start, - unsigned long end); -#endif - -static inline void vmacache_invalidate(struct mm_struct *mm) -{ - mm->vmacache_seqnum++; -} - -#endif /* __LINUX_VMACACHE_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index bfe38869498d7..19cf5b6892ceb 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -125,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif -#ifdef CONFIG_DEBUG_VM_VMACACHE -#define count_vm_vmacache_event(x) count_vm_event(x) -#else -#define count_vm_vmacache_event(x) do {} while (0) -#endif - #define __count_zid_vm_events(item, zid, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 7beceb447211d..d5e9ccde3ab8e 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -283,17 +282,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) if (!CACHE_FLUSH_IS_SAFE) return; - if (current->mm) { - int i; - - for (i = 0; i < VMACACHE_SIZE; i++) { - if (!current->vmacache.vmas[i]) - continue; - flush_cache_range(current->vmacache.vmas[i], - addr, addr + BREAK_INSTR_SIZE); - } - } - /* Force flush instruction cache if it was outside the mm */ flush_icache_range(addr, addr + BREAK_INSTR_SIZE); } diff --git a/kernel/fork.c b/kernel/fork.c index db420a1e93c7d..a653b259c3212 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -1123,7 +1122,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->mmap = NULL; mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); - mm->vmacache_seqnum = 0; atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); seqcount_init(&mm->write_protect_seq); @@ -1578,9 +1576,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) if (!oldmm) return 0; - /* initialize the new vmacache entries */ - vmacache_flush(tsk); - if (clone_flags & CLONE_VM) { mmget(oldmm); mm = oldmm; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a8a36e5897552..fb328c095d5b2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -813,14 +813,6 @@ config DEBUG_VM If unsure, say N. -config DEBUG_VM_VMACACHE - bool "Debug VMA caching" - depends on DEBUG_VM - help - Enable this to turn on VMA caching debug information. Doing so - can cause significant overhead, so only enable it in non-production - environments. - config DEBUG_VM_MAPLE_TREE bool "Debug VM maple trees" depends on DEBUG_VM diff --git a/mm/Makefile b/mm/Makefile index 9a564f8364035..8083fa85a3487 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o percpu.o slab_common.o \ - compaction.o vmacache.o \ + compaction.o \ interval_tree.o list_lru.o workingset.o \ debug.o gup.o mmap_lock.o $(mmu-y) diff --git a/mm/debug.c b/mm/debug.c index bef329bf28f01..2d625ca0e3269 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -155,7 +155,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" + pr_emerg("mm %px mmap %px task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif @@ -183,7 +183,7 @@ void dump_mm(const struct mm_struct *mm) "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/mmap.c b/mm/mmap.c index 4363945deea34..8201c50f47bf9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -725,9 +724,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, /* Remove from mm linked list - also updates highest_vm_end */ __vma_unlink_list(mm, next); - /* Kill the cache */ - vmacache_invalidate(mm); - if (file) __remove_shared_vm_struct(next, file, mapping); @@ -967,8 +963,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (remove_next) { __vma_unlink_list(mm, next); - /* Kill the cache */ - vmacache_invalidate(mm); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { @@ -2266,19 +2260,10 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, unsigned long end_addr) { - struct vm_area_struct *vma; unsigned long index = start_addr; mmap_assert_locked(mm); - /* Check the cache first. */ - vma = vmacache_find(mm, start_addr); - if (likely(vma)) - return vma; - - vma = mt_find(&mm->mm_mt, &index, end_addr - 1); - if (vma) - vmacache_update(start_addr, vma); - return vma; + return mt_find(&mm->mm_mt, &index, end_addr - 1); } EXPORT_SYMBOL(find_vma_intersection); @@ -2292,19 +2277,10 @@ EXPORT_SYMBOL(find_vma_intersection); */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { - struct vm_area_struct *vma; unsigned long index = addr; mmap_assert_locked(mm); - /* Check the cache first. */ - vma = vmacache_find(mm, addr); - if (likely(vma)) - return vma; - - vma = mt_find(&mm->mm_mt, &index, ULONG_MAX); - if (vma) - vmacache_update(addr, vma); - return vma; + return mt_find(&mm->mm_mt, &index, ULONG_MAX); } EXPORT_SYMBOL(find_vma); @@ -2694,9 +2670,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; - /* Kill the cache */ - vmacache_invalidate(mm); - /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or * VM_GROWSUP VMA. Such VMAs can change their size under @@ -3076,7 +3049,6 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, vma_mas_remove(&unmap, mas); - vmacache_invalidate(vma->vm_mm); if (vma->anon_vma) { anon_vma_interval_tree_post_update_vma(vma); anon_vma_unlock_write(vma->anon_vma); diff --git a/mm/nommu.c b/mm/nommu.c index b098c02511376..4ae252a785dcb 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -598,23 +597,12 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) */ static void delete_vma_from_mm(struct vm_area_struct *vma) { - int i; - struct address_space *mapping; - struct mm_struct *mm = vma->vm_mm; - struct task_struct *curr = current; MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); - mm->map_count--; - for (i = 0; i < VMACACHE_SIZE; i++) { - /* if the vma is cached, invalidate the entire cache */ - if (curr->vmacache.vmas[i] == vma) { - vmacache_invalidate(mm); - break; - } - } - + vma->vm_mm->map_count--; /* remove the VMA from the mapping */ if (vma->vm_file) { + struct address_space *mapping; mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); @@ -626,7 +614,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) /* remove from the MM's tree and list */ vma_mas_remove(vma, &mas); - __vma_unlink_list(mm, vma); + __vma_unlink_list(vma->vm_mm, vma); } /* @@ -659,20 +647,9 @@ EXPORT_SYMBOL(find_vma_intersection); */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { - struct vm_area_struct *vma; MA_STATE(mas, &mm->mm_mt, addr, addr); - /* check the cache first */ - vma = vmacache_find(mm, addr); - if (likely(vma)) - return vma; - - vma = mas_walk(&mas); - - if (vma) - vmacache_update(addr, vma); - - return vma; + return mas_walk(&mas); } EXPORT_SYMBOL(find_vma); @@ -706,11 +683,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, unsigned long end = addr + len; MA_STATE(mas, &mm->mm_mt, addr, addr); - /* check the cache first */ - vma = vmacache_find_exact(mm, addr, end); - if (vma) - return vma; - vma = mas_walk(&mas); if (!vma) return NULL; @@ -719,7 +691,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, if (vma->vm_end != end) return NULL; - vmacache_update(addr, vma); return vma; } diff --git a/mm/vmacache.c b/mm/vmacache.c deleted file mode 100644 index 01a6e6688ec1f..0000000000000 --- a/mm/vmacache.c +++ /dev/null @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2014 Davidlohr Bueso. - */ -#include -#include -#include -#include - -/* - * Hash based on the pmd of addr if configured with MMU, which provides a good - * hit rate for workloads with spatial locality. Otherwise, use pages. - */ -#ifdef CONFIG_MMU -#define VMACACHE_SHIFT PMD_SHIFT -#else -#define VMACACHE_SHIFT PAGE_SHIFT -#endif -#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) - -/* - * This task may be accessing a foreign mm via (for example) - * get_user_pages()->find_vma(). The vmacache is task-local and this - * task's vmacache pertains to a different mm (ie, its own). There is - * nothing we can do here. - * - * Also handle the case where a kernel thread has adopted this mm via - * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm. - */ -static inline bool vmacache_valid_mm(struct mm_struct *mm) -{ - return current->mm == mm && !(current->flags & PF_KTHREAD); -} - -void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) -{ - if (vmacache_valid_mm(newvma->vm_mm)) - current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; -} - -static bool vmacache_valid(struct mm_struct *mm) -{ - struct task_struct *curr; - - if (!vmacache_valid_mm(mm)) - return false; - - curr = current; - if (mm->vmacache_seqnum != curr->vmacache.seqnum) { - /* - * First attempt will always be invalid, initialize - * the new cache for this task here. - */ - curr->vmacache.seqnum = mm->vmacache_seqnum; - vmacache_flush(curr); - return false; - } - return true; -} - -struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) -{ - int idx = VMACACHE_HASH(addr); - int i; - - count_vm_vmacache_event(VMACACHE_FIND_CALLS); - - if (!vmacache_valid(mm)) - return NULL; - - for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache.vmas[idx]; - - if (vma) { -#ifdef CONFIG_DEBUG_VM_VMACACHE - if (WARN_ON_ONCE(vma->vm_mm != mm)) - break; -#endif - if (vma->vm_start <= addr && vma->vm_end > addr) { - count_vm_vmacache_event(VMACACHE_FIND_HITS); - return vma; - } - } - if (++idx == VMACACHE_SIZE) - idx = 0; - } - - return NULL; -} - -#ifndef CONFIG_MMU -struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, - unsigned long start, - unsigned long end) -{ - int idx = VMACACHE_HASH(start); - int i; - - count_vm_vmacache_event(VMACACHE_FIND_CALLS); - - if (!vmacache_valid(mm)) - return NULL; - - for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache.vmas[idx]; - - if (vma && vma->vm_start == start && vma->vm_end == end) { - count_vm_vmacache_event(VMACACHE_FIND_HITS); - return vma; - } - if (++idx == VMACACHE_SIZE) - idx = 0; - } - - return NULL; -} -#endif diff --git a/mm/vmstat.c b/mm/vmstat.c index 373d2730fcf21..da7e389cf33c9 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1382,10 +1382,6 @@ const char * const vmstat_text[] = { "nr_tlb_local_flush_one", #endif /* CONFIG_DEBUG_TLBFLUSH */ -#ifdef CONFIG_DEBUG_VM_VMACACHE - "vmacache_find_calls", - "vmacache_find_hits", -#endif #ifdef CONFIG_SWAP "swap_ra", "swap_ra_hit", From 6a66f215e4daeb1bddd30bf32ff9b4a936d41e37 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:46:59 +0000 Subject: [PATCH 037/321] mm: convert vma_lookup() to use mtree_load() Unlike the rbtree, the Maple Tree will return a NULL if there's nothing at a particular address. Since the previous commit dropped the vmacache, it is now possible to consult the tree directly. Link: https://lkml.kernel.org/r/20220504011345.662299-11-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-27-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index f78c1406a7bdb..db004e2b84b66 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2831,12 +2831,7 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) { - struct vm_area_struct *vma = find_vma(mm, addr); - - if (vma && addr < vma->vm_start) - vma = NULL; - - return vma; + return mtree_load(&mm->mm_mt, addr); } static inline unsigned long vm_start_gap(struct vm_area_struct *vma) From 5d327531c0dbe989782dbcbd089e6ee36b00daa0 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:01 +0000 Subject: [PATCH 038/321] mm/mmap: move mmap_region() below do_munmap() Relocation of code for the next commit. There should be no changes here. Link: https://lkml.kernel.org/r/20220504011345.662299-12-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-28-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 495 +++++++++++++++++++++++++++--------------------------- 1 file changed, 248 insertions(+), 247 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 8201c50f47bf9..07d0e58f06809 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1757,253 +1757,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } -unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma = NULL; - struct vm_area_struct *next, *prev, *merge; - pgoff_t pglen = len >> PAGE_SHIFT; - unsigned long charged = 0; - unsigned long end = addr + len; - unsigned long merge_start = addr, merge_end = end; - pgoff_t vm_pgoff; - int error; - MA_STATE(mas, &mm->mm_mt, addr, end - 1); - - /* Check against address space limit. */ - if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { - unsigned long nr_pages; - - /* - * MAP_FIXED may remove pages of mappings that intersects with - * requested mapping. Account for the pages it would unmap. - */ - nr_pages = count_vma_pages_range(mm, addr, end); - - if (!may_expand_vm(mm, vm_flags, - (len >> PAGE_SHIFT) - nr_pages)) - return -ENOMEM; - } - - /* Unmap any existing mapping in the area */ - if (do_munmap(mm, addr, len, uf)) - return -ENOMEM; - - /* - * Private writable mapping: check memory availability - */ - if (accountable_mapping(file, vm_flags)) { - charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory_mm(mm, charged)) - return -ENOMEM; - vm_flags |= VM_ACCOUNT; - } - - next = mas_next(&mas, ULONG_MAX); - prev = mas_prev(&mas, 0); - if (vm_flags & VM_SPECIAL) - goto cannot_expand; - - /* Attempt to expand an old mapping */ - /* Check next */ - if (next && next->vm_start == end && !vma_policy(next) && - can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, - NULL_VM_UFFD_CTX, NULL)) { - merge_end = next->vm_end; - vma = next; - vm_pgoff = next->vm_pgoff - pglen; - } - - /* Check prev */ - if (prev && prev->vm_end == addr && !vma_policy(prev) && - (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, - pgoff, vma->vm_userfaultfd_ctx, NULL) : - can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, - NULL_VM_UFFD_CTX , NULL))) { - merge_start = prev->vm_start; - vma = prev; - vm_pgoff = prev->vm_pgoff; - } - - - /* Actually expand, if possible */ - if (vma && - !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) { - khugepaged_enter_vma(vma, vm_flags); - goto expanded; - } - - mas.index = addr; - mas.last = end - 1; -cannot_expand: - /* - * Determine the object being mapped and call the appropriate - * specific mapper. the address has already been validated, but - * not unmapped, but the maps are removed from the list. - */ - vma = vm_area_alloc(mm); - if (!vma) { - error = -ENOMEM; - goto unacct_error; - } - - vma->vm_start = addr; - vma->vm_end = end; - vma->vm_flags = vm_flags; - vma->vm_page_prot = vm_get_page_prot(vm_flags); - vma->vm_pgoff = pgoff; - - if (file) { - if (vm_flags & VM_SHARED) { - error = mapping_map_writable(file->f_mapping); - if (error) - goto free_vma; - } - - vma->vm_file = get_file(file); - error = call_mmap(file, vma); - if (error) - goto unmap_and_free_vma; - - /* Can addr have changed?? - * - * Answer: Yes, several device drivers can do it in their - * f_op->mmap method. -DaveM - */ - WARN_ON_ONCE(addr != vma->vm_start); - - addr = vma->vm_start; - mas_reset(&mas); - - /* - * If vm_flags changed after call_mmap(), we should try merge - * vma again as we may succeed this time. - */ - if (unlikely(vm_flags != vma->vm_flags && prev)) { - merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, - NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); - if (merge) { - /* - * ->mmap() can change vma->vm_file and fput - * the original file. So fput the vma->vm_file - * here or we would add an extra fput for file - * and cause general protection fault - * ultimately. - */ - fput(vma->vm_file); - vm_area_free(vma); - vma = merge; - /* Update vm_flags to pick up the change. */ - addr = vma->vm_start; - vm_flags = vma->vm_flags; - goto unmap_writable; - } - } - - vm_flags = vma->vm_flags; - } else if (vm_flags & VM_SHARED) { - error = shmem_zero_setup(vma); - if (error) - goto free_vma; - } else { - vma_set_anonymous(vma); - } - - /* Allow architectures to sanity-check the vm_flags */ - if (!arch_validate_flags(vma->vm_flags)) { - error = -EINVAL; - if (file) - goto unmap_and_free_vma; - else - goto free_vma; - } - - if (mas_preallocate(&mas, vma, GFP_KERNEL)) { - error = -ENOMEM; - if (file) - goto unmap_and_free_vma; - else - goto free_vma; - } - - if (vma->vm_file) - i_mmap_lock_write(vma->vm_file->f_mapping); - - vma_mas_store(vma, &mas); - __vma_link_list(mm, vma, prev); - mm->map_count++; - if (vma->vm_file) { - if (vma->vm_flags & VM_SHARED) - mapping_allow_writable(vma->vm_file->f_mapping); - - flush_dcache_mmap_lock(vma->vm_file->f_mapping); - vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); - flush_dcache_mmap_unlock(vma->vm_file->f_mapping); - i_mmap_unlock_write(vma->vm_file->f_mapping); - } - - /* - * vma_merge() calls khugepaged_enter_vma() either, the below - * call covers the non-merge case. - */ - khugepaged_enter_vma(vma, vma->vm_flags); - - /* Once vma denies write, undo our temporary denial count */ -unmap_writable: - if (file && vm_flags & VM_SHARED) - mapping_unmap_writable(file->f_mapping); - file = vma->vm_file; -expanded: - perf_event_mmap(vma); - - vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); - if (vm_flags & VM_LOCKED) { - if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || - is_vm_hugetlb_page(vma) || - vma == get_gate_vma(current->mm)) - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; - else - mm->locked_vm += (len >> PAGE_SHIFT); - } - - if (file) - uprobe_mmap(vma); - - /* - * New (or expanded) vma always get soft dirty status. - * Otherwise user-space soft-dirty page tracker won't - * be able to distinguish situation when vma area unmapped, - * then new mapped in-place (which must be aimed as - * a completely new data area). - */ - vma->vm_flags |= VM_SOFTDIRTY; - - vma_set_page_prot(vma); - - validate_mm(mm); - return addr; - -unmap_and_free_vma: - fput(vma->vm_file); - vma->vm_file = NULL; - - /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); - charged = 0; - if (vm_flags & VM_SHARED) - mapping_unmap_writable(file->f_mapping); -free_vma: - vm_area_free(vma); -unacct_error: - if (charged) - vm_unacct_memory(charged); - validate_mm(mm); - return error; -} - -/* unmapped_area() Find an area between the low_limit and the high_limit with +/* + * unmapped_area() Find an area between the low_limit and the high_limit with * the correct alignment and offset, all from @info. Note: current->mm is used * for the search. * @@ -2871,6 +2626,252 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return __do_munmap(mm, start, len, uf, false); } +unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + struct vm_area_struct *next, *prev, *merge; + pgoff_t pglen = len >> PAGE_SHIFT; + unsigned long charged = 0; + unsigned long end = addr + len; + unsigned long merge_start = addr, merge_end = end; + pgoff_t vm_pgoff; + int error; + MA_STATE(mas, &mm->mm_mt, addr, end - 1); + + /* Check against address space limit. */ + if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { + unsigned long nr_pages; + + /* + * MAP_FIXED may remove pages of mappings that intersects with + * requested mapping. Account for the pages it would unmap. + */ + nr_pages = count_vma_pages_range(mm, addr, end); + + if (!may_expand_vm(mm, vm_flags, + (len >> PAGE_SHIFT) - nr_pages)) + return -ENOMEM; + } + + /* Unmap any existing mapping in the area */ + if (do_munmap(mm, addr, len, uf)) + return -ENOMEM; + + /* + * Private writable mapping: check memory availability + */ + if (accountable_mapping(file, vm_flags)) { + charged = len >> PAGE_SHIFT; + if (security_vm_enough_memory_mm(mm, charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; + } + + next = mas_next(&mas, ULONG_MAX); + prev = mas_prev(&mas, 0); + if (vm_flags & VM_SPECIAL) + goto cannot_expand; + + /* Attempt to expand an old mapping */ + /* Check next */ + if (next && next->vm_start == end && !vma_policy(next) && + can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, + NULL_VM_UFFD_CTX, NULL)) { + merge_end = next->vm_end; + vma = next; + vm_pgoff = next->vm_pgoff - pglen; + } + + /* Check prev */ + if (prev && prev->vm_end == addr && !vma_policy(prev) && + (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, + pgoff, vma->vm_userfaultfd_ctx, NULL) : + can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, + NULL_VM_UFFD_CTX , NULL))) { + merge_start = prev->vm_start; + vma = prev; + vm_pgoff = prev->vm_pgoff; + } + + + /* Actually expand, if possible */ + if (vma && + !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) { + khugepaged_enter_vma(vma, vm_flags); + goto expanded; + } + + mas.index = addr; + mas.last = end - 1; +cannot_expand: + /* + * Determine the object being mapped and call the appropriate + * specific mapper. the address has already been validated, but + * not unmapped, but the maps are removed from the list. + */ + vma = vm_area_alloc(mm); + if (!vma) { + error = -ENOMEM; + goto unacct_error; + } + + vma->vm_start = addr; + vma->vm_end = end; + vma->vm_flags = vm_flags; + vma->vm_page_prot = vm_get_page_prot(vm_flags); + vma->vm_pgoff = pgoff; + + if (file) { + if (vm_flags & VM_SHARED) { + error = mapping_map_writable(file->f_mapping); + if (error) + goto free_vma; + } + + vma->vm_file = get_file(file); + error = call_mmap(file, vma); + if (error) + goto unmap_and_free_vma; + + /* Can addr have changed?? + * + * Answer: Yes, several device drivers can do it in their + * f_op->mmap method. -DaveM + */ + WARN_ON_ONCE(addr != vma->vm_start); + + addr = vma->vm_start; + mas_reset(&mas); + + /* + * If vm_flags changed after call_mmap(), we should try merge + * vma again as we may succeed this time. + */ + if (unlikely(vm_flags != vma->vm_flags && prev)) { + merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, + NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); + if (merge) { + /* + * ->mmap() can change vma->vm_file and fput + * the original file. So fput the vma->vm_file + * here or we would add an extra fput for file + * and cause general protection fault + * ultimately. + */ + fput(vma->vm_file); + vm_area_free(vma); + vma = merge; + /* Update vm_flags to pick up the change. */ + addr = vma->vm_start; + vm_flags = vma->vm_flags; + goto unmap_writable; + } + } + + vm_flags = vma->vm_flags; + } else if (vm_flags & VM_SHARED) { + error = shmem_zero_setup(vma); + if (error) + goto free_vma; + } else { + vma_set_anonymous(vma); + } + + /* Allow architectures to sanity-check the vm_flags */ + if (!arch_validate_flags(vma->vm_flags)) { + error = -EINVAL; + if (file) + goto unmap_and_free_vma; + else + goto free_vma; + } + + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + error = -ENOMEM; + if (file) + goto unmap_and_free_vma; + else + goto free_vma; + } + + if (vma->vm_file) + i_mmap_lock_write(vma->vm_file->f_mapping); + + vma_mas_store(vma, &mas); + __vma_link_list(mm, vma, prev); + mm->map_count++; + if (vma->vm_file) { + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(vma->vm_file->f_mapping); + + flush_dcache_mmap_lock(vma->vm_file->f_mapping); + vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); + flush_dcache_mmap_unlock(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); + } + + /* + * vma_merge() calls khugepaged_enter_vma() either, the below + * call covers the non-merge case. + */ + khugepaged_enter_vma(vma, vma->vm_flags); + + /* Once vma denies write, undo our temporary denial count */ +unmap_writable: + if (file && vm_flags & VM_SHARED) + mapping_unmap_writable(file->f_mapping); + file = vma->vm_file; +expanded: + perf_event_mmap(vma); + + vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); + if (vm_flags & VM_LOCKED) { + if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || + is_vm_hugetlb_page(vma) || + vma == get_gate_vma(current->mm)) + vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + else + mm->locked_vm += (len >> PAGE_SHIFT); + } + + if (file) + uprobe_mmap(vma); + + /* + * New (or expanded) vma always get soft dirty status. + * Otherwise user-space soft-dirty page tracker won't + * be able to distinguish situation when vma area unmapped, + * then new mapped in-place (which must be aimed as + * a completely new data area). + */ + vma->vm_flags |= VM_SOFTDIRTY; + + vma_set_page_prot(vma); + + validate_mm(mm); + return addr; + +unmap_and_free_vma: + fput(vma->vm_file); + vma->vm_file = NULL; + + /* Undo any partial mapping done by a device driver. */ + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + if (vm_flags & VM_SHARED) + mapping_unmap_writable(file->f_mapping); +free_vma: + vm_area_free(vma); +unacct_error: + if (charged) + vm_unacct_memory(charged); + validate_mm(mm); + return error; +} + static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; From f53608661b03f8dd3b75cca80a3501c965d60678 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:01 +0000 Subject: [PATCH 039/321] mm/mmap: reorganize munmap to use maple states Remove __do_munmap() in favour of do_munmap(), do_mas_munmap(), and do_mas_align_munmap(). do_munmap() is a wrapper to create a maple state for any callers that have not been converted to the maple tree. do_mas_munmap() takes a maple state to mumap a range. This is just a small function which checks for error conditions and aligns the end of the range. do_mas_align_munmap() uses the aligned range to mumap a range. do_mas_align_munmap() starts with the first VMA in the range, then finds the last VMA in the range. Both start and end are split if necessary. Then the VMAs are removed from the linked list and the mm mlock count is updated at the same time. Followed by a single tree operation of overwriting the area in with a NULL. Finally, the detached list is unmapped and freed. By reorganizing the munmap calls as outlined, it is now possible to avoid extra work of aligning pre-aligned callers which are known to be safe, avoid extra VMA lookups or tree walks for modifications. detach_vmas_to_be_unmapped() is no longer used, so drop this code. vm_brk_flags() can just call the do_mas_munmap() as it checks for intersecting VMAs directly. Link: https://lkml.kernel.org/r/20220504011345.662299-13-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-29-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- include/linux/mm.h | 5 +- mm/mmap.c | 231 ++++++++++++++++++++++++++++----------------- mm/mremap.c | 17 ++-- 3 files changed, 158 insertions(+), 95 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index db004e2b84b66..376ccbcac8c2a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2746,8 +2746,9 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); -extern int __do_munmap(struct mm_struct *, unsigned long, size_t, - struct list_head *uf, bool downgrade); +extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, + unsigned long start, size_t len, struct list_head *uf, + bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); diff --git a/mm/mmap.c b/mm/mmap.c index 07d0e58f06809..f3d597519e60d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2396,47 +2396,6 @@ static void unmap_region(struct mm_struct *mm, tlb_finish_mmu(&tlb); } -/* - * Create a list of vma's touched by the unmap, removing them from the mm's - * vma list as we go.. - */ -static bool -detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma, struct vm_area_struct *prev, - unsigned long end) -{ - struct vm_area_struct **insertion_point; - struct vm_area_struct *tail_vma = NULL; - - insertion_point = (prev ? &prev->vm_next : &mm->mmap); - vma->vm_prev = NULL; - vma_mas_szero(mas, vma->vm_start, end); - do { - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm -= vma_pages(vma); - mm->map_count--; - tail_vma = vma; - vma = vma->vm_next; - } while (vma && vma->vm_start < end); - *insertion_point = vma; - if (vma) - vma->vm_prev = prev; - else - mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; - tail_vma->vm_next = NULL; - - /* - * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or - * VM_GROWSUP VMA. Such VMAs can change their size under - * down_read(mmap_lock) and collide with the VMA we are about to unmap. - */ - if (vma && (vma->vm_flags & VM_GROWSDOWN)) - return false; - if (prev && (prev->vm_flags & VM_GROWSUP)) - return false; - return true; -} - /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. @@ -2516,40 +2475,51 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } -/* Munmap is split into 2 main parts -- this part which finds - * what needs doing, and the areas themselves, which do the - * work. This now handles partial unmappings. - * Jeremy Fitzhardinge - */ -int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, - struct list_head *uf, bool downgrade) +static inline int +unlock_range(struct vm_area_struct *start, struct vm_area_struct **tail, + unsigned long limit) { - unsigned long end; - struct vm_area_struct *vma, *prev, *last; - int error = -ENOMEM; - MA_STATE(mas, &mm->mm_mt, 0, 0); + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + int count = 0; - if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) - return -EINVAL; + while (tmp && tmp->vm_start < limit) { + *tail = tmp; + count++; + if (tmp->vm_flags & VM_LOCKED) + mm->locked_vm -= vma_pages(tmp); - len = PAGE_ALIGN(len); - end = start + len; - if (len == 0) - return -EINVAL; + tmp = tmp->vm_next; + } - /* arch_unmap() might do unmaps itself. */ - arch_unmap(mm, start, end); + return count; +} - /* Find the first overlapping VMA where start < vma->vm_end */ - vma = find_vma_intersection(mm, start, end); - if (!vma) - return 0; +/* + * do_mas_align_munmap() - munmap the aligned region from @start to @end. + * @mas: The maple_state, ideally set up to alter the correct tree location. + * @vma: The starting vm_area_struct + * @mm: The mm_struct + * @start: The aligned start address to munmap. + * @end: The aligned end address to munmap. + * @uf: The userfaultfd list_head + * @downgrade: Set to true to attempt a write downgrade of the mmap_sem + * + * If @downgrade is true, check return code for potential release of the lock. + */ +static int +do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *uf, bool downgrade) +{ + struct vm_area_struct *prev, *last; + int error = -ENOMEM; + /* we have start < vma->vm_end */ - if (mas_preallocate(&mas, vma, GFP_KERNEL)) + if (mas_preallocate(mas, vma, GFP_KERNEL)) return -ENOMEM; - prev = vma->vm_prev; - /* we have start < vma->vm_end */ + mas->last = end - 1; /* * If we need to split any vma, do it now to save pain later. * @@ -2570,17 +2540,31 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, error = __split_vma(mm, vma, start, 0); if (error) goto split_failed; + prev = vma; + vma = __vma_next(mm, prev); + mas->index = start; + mas_reset(mas); + } else { + prev = vma->vm_prev; } + if (vma->vm_end >= end) + last = vma; + else + last = find_vma_intersection(mm, end - 1, end); + /* Does it split the last one? */ - last = find_vma(mm, end); - if (last && end > last->vm_start) { + if (last && end < last->vm_end) { error = __split_vma(mm, last, end, 1); + if (error) goto split_failed; + + if (vma == last) + vma = __vma_next(mm, prev); + mas_reset(mas); } - vma = __vma_next(mm, prev); if (unlikely(uf)) { /* @@ -2593,16 +2577,46 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, * failure that it's not worth optimizing it for. */ error = userfaultfd_unmap_prep(vma, start, end, uf); + if (error) goto userfaultfd_error; } - /* Detach vmas from rbtree */ - if (!detach_vmas_to_be_unmapped(mm, &mas, vma, prev, end)) - downgrade = false; + /* + * unlock any mlock()ed ranges before detaching vmas, count the number + * of VMAs to be dropped, and return the tail entry of the affected + * area. + */ + mm->map_count -= unlock_range(vma, &last, end); + /* Drop removed area from the tree */ + mas_store_prealloc(mas, NULL); - if (downgrade) - mmap_write_downgrade(mm); + /* Detach vmas from the MM linked list */ + vma->vm_prev = NULL; + if (prev) + prev->vm_next = last->vm_next; + else + mm->mmap = last->vm_next; + + if (last->vm_next) { + last->vm_next->vm_prev = prev; + last->vm_next = NULL; + } else + mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; + + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or + * VM_GROWSUP VMA. Such VMAs can change their size under + * down_read(mmap_lock) and collide with the VMA we are about to unmap. + */ + if (downgrade) { + if (last && (last->vm_flags & VM_GROWSDOWN)) + downgrade = false; + else if (prev && (prev->vm_flags & VM_GROWSUP)) + downgrade = false; + else + mmap_write_downgrade(mm); + } unmap_region(mm, vma, prev, start, end); @@ -2616,14 +2630,63 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, map_count_exceeded: split_failed: userfaultfd_error: - mas_destroy(&mas); + mas_destroy(mas); return error; } +/* + * do_mas_munmap() - munmap a given range. + * @mas: The maple state + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length of the range to munmap + * @uf: The userfaultfd list_head + * @downgrade: set to true if the user wants to attempt to write_downgrade the + * mmap_sem + * + * This function takes a @mas that is either pointing to the previous VMA or set + * to MA_START and sets it up to remove the mapping(s). The @len will be + * aligned and any arch_unmap work will be preformed. + * + * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise. + */ +int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, + unsigned long start, size_t len, struct list_head *uf, + bool downgrade) +{ + unsigned long end; + struct vm_area_struct *vma; + + if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + + end = start + PAGE_ALIGN(len); + if (end == start) + return -EINVAL; + + /* arch_unmap() might do unmaps itself. */ + arch_unmap(mm, start, end); + + /* Find the first overlapping VMA */ + vma = mas_find(mas, end - 1); + if (!vma) + return 0; + + return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); +} + +/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length to be munmapped. + * @uf: The userfaultfd list_head + */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { - return __do_munmap(mm, start, len, uf, false); + MA_STATE(mas, &mm->mm_mt, start, start); + + return do_mas_munmap(&mas, mm, start, len, uf, false); } unsigned long mmap_region(struct file *file, unsigned long addr, @@ -2657,7 +2720,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } /* Unmap any existing mapping in the area */ - if (do_munmap(mm, addr, len, uf)) + if (do_mas_munmap(&mas, mm, addr, len, uf, false)) return -ENOMEM; /* @@ -2877,11 +2940,12 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade) int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); + MA_STATE(mas, &mm->mm_mt, start, start); if (mmap_write_lock_killable(mm)) return -EINTR; - ret = __do_munmap(mm, start, len, &uf, downgrade); + ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_lock is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset @@ -3016,10 +3080,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, if (likely((vma->vm_end < oldbrk) || ((vma->vm_start == newbrk) && (vma->vm_end == oldbrk)))) { /* remove entire mapping(s) */ - mas_set(mas, newbrk); - if (vma->vm_start != newbrk) - mas_reset(mas); /* cause a re-walk for the first overlap. */ - ret = __do_munmap(mm, newbrk, oldbrk - newbrk, uf, true); + ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true); goto munmap_full_vma; } @@ -3200,9 +3261,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) if (ret) goto limits_failed; - if (find_vma_intersection(mm, addr, addr + len)) - ret = do_munmap(mm, addr, len, &uf); - + ret = do_mas_munmap(&mas, mm, addr, len, &uf, 0); if (ret) goto munmap_failed; diff --git a/mm/mremap.c b/mm/mremap.c index b522cd0259a0f..e0fba90042466 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -975,20 +975,23 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. - * __do_munmap does all the needed commit accounting, and + * do_mas_munmap does all the needed commit accounting, and * downgrades mmap_lock to read if so directed. */ if (old_len >= new_len) { int retval; + MA_STATE(mas, &mm->mm_mt, addr + new_len, addr + new_len); - retval = __do_munmap(mm, addr+new_len, old_len - new_len, - &uf_unmap, true); - if (retval < 0 && old_len != new_len) { - ret = retval; - goto out; + retval = do_mas_munmap(&mas, mm, addr + new_len, + old_len - new_len, &uf_unmap, true); /* Returning 1 indicates mmap_lock is downgraded to read. */ - } else if (retval == 1) + if (retval == 1) { downgraded = true; + } else if (retval < 0 && old_len != new_len) { + ret = retval; + goto out; + } + ret = addr; goto out; } From 7ff922b13406b4f25d08fca780b947178353148e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:02 +0000 Subject: [PATCH 040/321] mm/mmap: change do_brk_munmap() to use do_mas_align_munmap() do_brk_munmap() has already aligned the address and has a maple tree state to be used. Use the new do_mas_align_munmap() to avoid unnecessary alignment and error checks. Link: https://lkml.kernel.org/r/20220504011345.662299-14-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220519150509.1290067-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-30-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index f3d597519e60d..7424e7a5c9b0a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3073,14 +3073,14 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; struct vm_area_struct unmap; unsigned long unmap_pages; - int ret = 1; + int ret; arch_unmap(mm, newbrk, oldbrk); - if (likely((vma->vm_end < oldbrk) || - ((vma->vm_start == newbrk) && (vma->vm_end == oldbrk)))) { + if (likely((vma->vm_end < oldbrk) || (vma->vm_start >= newbrk))) { /* remove entire mapping(s) */ - ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true); + ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf, + true); goto munmap_full_vma; } From 1936ed1c4930837e122e8d696ac2e6620a857732 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:02 +0000 Subject: [PATCH 041/321] arm64: remove mmap linked list from vdso Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-15-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-31-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/arm64/kernel/vdso.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index a61fc4f989b37..a8388af62b99e 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -136,10 +136,11 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm)) From 8e8a2ae20b243421d17795e3b07e7e85f257f23f Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:02 +0000 Subject: [PATCH 042/321] arm64: Change elfcore for_each_mte_vma() to use VMA iterator Rework for_each_mte_vma() to use a VMA iterator instead of an explicit linked-list. Link: https://lkml.kernel.org/r/20220504011345.662299-16-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-32-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220218023650.672072-1-Liam.Howlett@oracle.com Signed-off-by: Will Deacon Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/arm64/kernel/elfcore.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c index 98d67444a5b61..27ef7ad3ffd2e 100644 --- a/arch/arm64/kernel/elfcore.c +++ b/arch/arm64/kernel/elfcore.c @@ -8,9 +8,9 @@ #include #include -#define for_each_mte_vma(tsk, vma) \ +#define for_each_mte_vma(vmi, vma) \ if (system_supports_mte()) \ - for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \ + for_each_vma(vmi, vma) \ if (vma->vm_flags & VM_MTE) static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma) @@ -81,8 +81,9 @@ Elf_Half elf_core_extra_phdrs(void) { struct vm_area_struct *vma; int vma_count = 0; + VMA_ITERATOR(vmi, current->mm, 0); - for_each_mte_vma(current, vma) + for_each_mte_vma(vmi, vma) vma_count++; return vma_count; @@ -91,8 +92,9 @@ Elf_Half elf_core_extra_phdrs(void) int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, current->mm, 0); - for_each_mte_vma(current, vma) { + for_each_mte_vma(vmi, vma) { struct elf_phdr phdr; phdr.p_type = PT_AARCH64_MEMTAG_MTE; @@ -116,8 +118,9 @@ size_t elf_core_extra_data_size(void) { struct vm_area_struct *vma; size_t data_size = 0; + VMA_ITERATOR(vmi, current->mm, 0); - for_each_mte_vma(current, vma) + for_each_mte_vma(vmi, vma) data_size += mte_vma_tag_dump_size(vma); return data_size; @@ -126,8 +129,9 @@ size_t elf_core_extra_data_size(void) int elf_core_write_extra_data(struct coredump_params *cprm) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, current->mm, 0); - for_each_mte_vma(current, vma) { + for_each_mte_vma(vmi, vma) { if (vma->vm_flags & VM_DONTDUMP) continue; From caa9c58d54e1779ebc9787e6dc8aba10a0beb0a0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:03 +0000 Subject: [PATCH 043/321] parisc: remove mmap linked list from cache handling Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-17-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-33-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/parisc/kernel/cache.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index a9bc578e4c52e..b54c7cf6fcdea 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -660,15 +660,20 @@ static inline unsigned long mm_total_size(struct mm_struct *mm) { struct vm_area_struct *vma; unsigned long usize = 0; + VMA_ITERATOR(vmi, mm, 0); - for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next) + for_each_vma(vmi, vma) { + if (usize >= parisc_cache_flush_threshold) + break; usize += vma->vm_end - vma->vm_start; + } return usize; } void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); /* * Flushing the whole cache on each cpu takes forever on @@ -688,7 +693,7 @@ void flush_cache_mm(struct mm_struct *mm) } /* Flush mm */ - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) flush_cache_pages(vma, vma->vm_start, vma->vm_end); } From 874441040b1ec1e1f0719539dd4099fb742debbc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:03 +0000 Subject: [PATCH 044/321] powerpc: remove mmap linked list walks Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-18-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-34-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/powerpc/kernel/vdso.c | 6 +++--- arch/powerpc/mm/book3s32/tlb.c | 11 ++++++----- arch/powerpc/mm/book3s64/subpage_prot.c | 13 ++----------- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 0da287544054f..94a8fa5017c35 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -113,18 +113,18 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page) int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; mmap_read_lock(mm); - - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (vma_is_special_mapping(vma, &vvar_spec)) zap_page_range(vma, vma->vm_start, size); } - mmap_read_unlock(mm); + return 0; } diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index 19f0ef950d773..9ad6b56bfec96 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -81,14 +81,15 @@ EXPORT_SYMBOL(hash__flush_range); void hash__flush_tlb_mm(struct mm_struct *mm) { struct vm_area_struct *mp; + VMA_ITERATOR(vmi, mm, 0); /* - * It is safe to go down the mm's list of vmas when called - * from dup_mmap, holding mmap_lock. It would also be safe from - * unmap_region or exit_mmap, but not from vmtruncate on SMP - - * but it seems dup_mmap is the only SMP case which gets here. + * It is safe to iterate the vmas when called from dup_mmap, + * holding mmap_lock. It would also be safe from unmap_region + * or exit_mmap, but not from vmtruncate on SMP - but it seems + * dup_mmap is the only SMP case which gets here. */ - for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) + for_each_vma(vmi, mp) hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); } EXPORT_SYMBOL(hash__flush_tlb_mm); diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index 60c6ea16a972a..d73b3b4176e81 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -149,24 +149,15 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, addr); /* * We don't try too hard, we just mark all the vma in that range * VM_NOHUGEPAGE and split them. */ - vma = find_vma(mm, addr); - /* - * If the range is in unmapped range, just return - */ - if (vma && ((addr + len) <= vma->vm_start)) - return; - - while (vma) { - if (vma->vm_start >= (addr + len)) - break; + for_each_vma_range(vmi, vma, addr + len) { vma->vm_flags |= VM_NOHUGEPAGE; walk_page_vma(vma, &subpage_walk_ops, NULL); - vma = vma->vm_next; } } #else From cfb90c38fb988e443da7bde7d84448de12826fbd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:03 +0000 Subject: [PATCH 045/321] s390: remove vma linked list walks Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-19-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-35-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/s390/kernel/vdso.c | 3 ++- arch/s390/mm/gmap.c | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 5075cde77b292..535099f2736da 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -69,10 +69,11 @@ static struct page *find_timens_vvar_page(struct vm_area_struct *vma) int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (!vma_is_special_mapping(vma, &vvar_mapping)) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index b8ae4a4aa2ba4..6e24f337eac0b 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2515,8 +2515,9 @@ static const struct mm_walk_ops thp_split_walk_ops = { static inline void thp_split_mm(struct mm_struct *mm) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); - for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { + for_each_vma(vmi, vma) { vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE; walk_page_vma(vma, &thp_split_walk_ops, NULL); @@ -2584,8 +2585,9 @@ int gmap_mark_unmergeable(void) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int ret; + VMA_ITERATOR(vmi, mm, 0); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, MADV_UNMERGEABLE, &vma->vm_flags); if (ret) From 5f34769c63ca5bd011066f9338b8916471ae08d6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:03 +0000 Subject: [PATCH 046/321] x86: remove vma linked list walks Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-20-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-36-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/x86/entry/vdso/vma.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1000d457c3321..6292b960037b7 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); - - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (vma_is_special_mapping(vma, &vvar_mapping)) zap_page_range(vma, vma->vm_start, size); } - mmap_read_unlock(mm); + return 0; } #else @@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); /* @@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) * We could search vma near context.vdso, but it's a slowpath, * so let's explicitly check all VMAs to be completely sure. */ - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_mapping) || vma_is_special_mapping(vma, &vvar_mapping)) { mmap_write_unlock(mm); From 7d5889ec0b56fa65895665843d2e2b723aabf048 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:05 +0000 Subject: [PATCH 047/321] xtensa: remove vma linked list walks Use the VMA iterator instead. Since VMA can no longer be NULL in the loop, then deal with out-of-memory outside the loop. This means a slightly longer run time in the failure case (-ENOMEM) - it will run to the end of the VMAs before erroring instead of in the middle of the loop. Link: https://lkml.kernel.org/r/20220504011345.662299-21-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-37-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/xtensa/kernel/syscall.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 201356faa7e6e..b3c2450d6f239 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -58,6 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vmm; + struct vma_iterator vmi; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -79,15 +80,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vm_start_gap(vmm)) - return addr; + vma_iter_init(&vmi, current->mm, addr); + for_each_vma(vmi, vmm) { + /* At this point: (addr < vmm->vm_end). */ + if (addr + len <= vm_start_gap(vmm)) + break; + addr = vmm->vm_end; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr, pgoff); } + + if (TASK_SIZE - len < addr) + return -ENOMEM; + + return addr; } #endif From 3fee6f5608df7462a06faeeaf97e029e939513ba Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:05 +0000 Subject: [PATCH 048/321] cxl: remove vma linked list walk Use the VMA iterator instead. This requires a little restructuring of the surrounding code to hoist the mm to the caller. That turns cxl_prefault_one() into a trivial function, so call cxl_fault_segment() directly. Link: https://lkml.kernel.org/r/20220504011345.662299-22-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-38-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- drivers/misc/cxl/fault.c | 45 ++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 30 deletions(-) diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 60c829113299b..2c64f55cf01f8 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work) mmput(mm); } -static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) -{ - struct mm_struct *mm; - - mm = get_mem_context(ctx); - if (mm == NULL) { - pr_devel("cxl_prefault_one unable to get mm %i\n", - pid_nr(ctx->pid)); - return; - } - - cxl_fault_segment(ctx, mm, ea); - - mmput(mm); -} - static u64 next_segment(u64 ea, u64 vsid) { if (vsid & SLB_VSID_B_1T) @@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid) return ea + 1; } -static void cxl_prefault_vma(struct cxl_context *ctx) +static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm) { u64 ea, last_esid = 0; struct copro_slb slb; + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; int rc; - struct mm_struct *mm; - - mm = get_mem_context(ctx); - if (mm == NULL) { - pr_devel("cxl_prefault_vm unable to get mm %i\n", - pid_nr(ctx->pid)); - return; - } mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { for (ea = vma->vm_start; ea < vma->vm_end; ea = next_segment(ea, slb.vsid)) { rc = copro_calculate_slb(mm, ea, &slb); @@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx) } } mmap_read_unlock(mm); - - mmput(mm); } void cxl_prefault(struct cxl_context *ctx, u64 wed) { + struct mm_struct *mm = get_mem_context(ctx); + + if (mm == NULL) { + pr_devel("cxl_prefault unable to get mm %i\n", + pid_nr(ctx->pid)); + return; + } + switch (ctx->afu->prefault_mode) { case CXL_PREFAULT_WED: - cxl_prefault_one(ctx, wed); + cxl_fault_segment(ctx, mm, wed); break; case CXL_PREFAULT_ALL: - cxl_prefault_vma(ctx); + cxl_prefault_vma(ctx, mm); break; default: break; } + + mmput(mm); } From 520aea3534c949d34af620e1bce984af2c387a76 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:05 +0000 Subject: [PATCH 049/321] optee: remove vma linked list walk Use the VMA iterator instead. Change the calling convention of __check_mem_type() to pass in the mm instead of the first vma in the range. Link: https://lkml.kernel.org/r/20220504011345.662299-23-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-39-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- drivers/tee/optee/call.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index 28f87cd8b3ede..290b1bb0e9cd7 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p) #endif } -static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) +static int __check_mem_type(struct mm_struct *mm, unsigned long start, + unsigned long end) { - while (vma && is_normal_memory(vma->vm_page_prot)) { - if (vma->vm_end >= end) - return 0; - vma = vma->vm_next; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, start); + + for_each_vma_range(vmi, vma, end) { + if (!is_normal_memory(vma->vm_page_prot)) + return -EINVAL; } - return -EINVAL; + return 0; } int optee_check_mem_type(unsigned long start, size_t num_pages) @@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages) return 0; mmap_read_lock(mm); - rc = __check_mem_type(find_vma(mm, start), - start + num_pages * PAGE_SIZE); + rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE); mmap_read_unlock(mm); return rc; From 2b638f1cd49b4186b37904468944cdfb31f2a3e4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:05 +0000 Subject: [PATCH 050/321] um: remove vma linked list walk Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220504011345.662299-24-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-40-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/um/kernel/tlb.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index bc38f79ca3a38..ad449173a1a1c 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -584,21 +584,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm(struct mm_struct *mm) { - struct vm_area_struct *vma = mm->mmap; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); - while (vma != NULL) { + for_each_vma(vmi, vma) fix_range(mm, vma->vm_start, vma->vm_end, 0); - vma = vma->vm_next; - } } void force_flush_all(void) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma = mm->mmap; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); - while (vma != NULL) { + for_each_vma(vmi, vma) fix_range(mm, vma->vm_start, vma->vm_end, 1); - vma = vma->vm_next; - } } From 855068f568a9790bc0dd016e5f455a28d534fe1a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:06 +0000 Subject: [PATCH 051/321] coredump: remove vma linked list walk Use the Maple Tree iterator instead. This is too complicated for the VMA iterator to handle, so let's open-code it for now. If this turns out to be a common pattern, we can migrate it to common code. Link: https://lkml.kernel.org/r/20220504011345.662299-25-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-41-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/coredump.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index ebc43f960b645..3a0022c1ca36e 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -1072,30 +1072,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, return vma->vm_end - vma->vm_start; } -static struct vm_area_struct *first_vma(struct task_struct *tsk, - struct vm_area_struct *gate_vma) -{ - struct vm_area_struct *ret = tsk->mm->mmap; - - if (ret) - return ret; - return gate_vma; -} - /* * Helper function for iterating across a vma list. It ensures that the caller * will visit `gate_vma' prior to terminating the search. */ -static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, +static struct vm_area_struct *coredump_next_vma(struct ma_state *mas, + struct vm_area_struct *vma, struct vm_area_struct *gate_vma) { - struct vm_area_struct *ret; - - ret = this_vma->vm_next; - if (ret) - return ret; - if (this_vma == gate_vma) + if (gate_vma && (vma == gate_vma)) return NULL; + + vma = mas_next(mas, ULONG_MAX); + if (vma) + return vma; return gate_vma; } @@ -1119,9 +1109,10 @@ static void free_vma_snapshot(struct coredump_params *cprm) */ static bool dump_vma_snapshot(struct coredump_params *cprm) { - struct vm_area_struct *vma, *gate_vma; + struct vm_area_struct *gate_vma, *vma = NULL; struct mm_struct *mm = current->mm; - int i; + MA_STATE(mas, &mm->mm_mt, 0, 0); + int i = 0; /* * Once the stack expansion code is fixed to not change VMA bounds @@ -1141,8 +1132,7 @@ static bool dump_vma_snapshot(struct coredump_params *cprm) return false; } - for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; - vma = next_vma(vma, gate_vma), i++) { + while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) { struct core_vma_metadata *m = cprm->vma_meta + i; m->start = vma->vm_start; @@ -1150,10 +1140,10 @@ static bool dump_vma_snapshot(struct coredump_params *cprm) m->flags = vma->vm_flags; m->dump_size = vma_dump_size(vma, cprm->mm_flags); m->pgoff = vma->vm_pgoff; - m->file = vma->vm_file; if (m->file) get_file(m->file); + i++; } mmap_write_unlock(mm); From 4d73d90d7fab5a4a182d3c712bbeadc1d68f7954 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:06 +0000 Subject: [PATCH 052/321] exec: use VMA iterator instead of linked list Remove a use of the vm_next list by doing the initial lookup with the VMA iterator and then using it to find the next entry. Link: https://lkml.kernel.org/r/20220504011345.662299-26-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-42-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/exec.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index b97afa682ffe9..9843cecd031a7 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -686,6 +686,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) unsigned long length = old_end - old_start; unsigned long new_start = old_start - shift; unsigned long new_end = old_end - shift; + VMA_ITERATOR(vmi, mm, new_start); + struct vm_area_struct *next; struct mmu_gather tlb; BUG_ON(new_start > new_end); @@ -694,7 +696,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * ensure there are no vmas between where we want to go * and where we are */ - if (vma != find_vma(mm, new_start)) + if (vma != vma_next(&vmi)) return -EFAULT; /* @@ -713,12 +715,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) lru_add_drain(); tlb_gather_mmu(&tlb, mm); + next = vma_next(&vmi); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. */ free_pgd_range(&tlb, new_end, old_end, new_end, - vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); + next ? next->vm_start : USER_PGTABLES_CEILING); } else { /* * otherwise, clean from old_start; this is done to not touch @@ -727,7 +730,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * for the others its just a little faster. */ free_pgd_range(&tlb, old_start, old_end, new_end, - vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); + next ? next->vm_start : USER_PGTABLES_CEILING); } tlb_finish_mmu(&tlb); From 1bf31c1166426e8ff0dd18eab0adab2163aff726 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:07 +0000 Subject: [PATCH 053/321] fs/proc/base: use maple tree iterators in place of linked list Link: https://lkml.kernel.org/r/20220504011345.662299-27-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-43-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/proc/base.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/proc/base.c b/fs/proc/base.c index 8dfa36a99c742..6178161687480 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2322,6 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) GENRADIX(struct map_files_info) fa; struct map_files_info *p; int ret; + MA_STATE(mas, NULL, 0, 0); genradix_init(&fa); @@ -2349,6 +2350,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) } nr_files = 0; + mas.tree = &mm->mm_mt; /* * We need two passes here: @@ -2360,7 +2362,8 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) * routine might require mmap_lock taken in might_fault(). */ - for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { + pos = 2; + mas_for_each(&mas, vma, ULONG_MAX) { if (!vma->vm_file) continue; if (++pos <= ctx->pos) From 459dffa1d3cbe6e70fd59d583442b83c19ac093d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:07 +0000 Subject: [PATCH 054/321] fs/proc/task_mmu: stop using linked list and highest_vm_end Remove references to mm_struct linked list and highest_vm_end for when they are removed Link: https://lkml.kernel.org/r/20220504011345.662299-28-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-44-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/proc/internal.h | 2 +- fs/proc/task_mmu.c | 73 ++++++++++++++++++++++++++-------------------- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 06a80f78433d8..f03000764ce52 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -285,7 +285,7 @@ struct proc_maps_private { struct task_struct *task; struct mm_struct *mm; #ifdef CONFIG_MMU - struct vm_area_struct *tail_vma; + struct vma_iterator iter; #endif #ifdef CONFIG_NUMA struct mempolicy *task_mempolicy; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index d2011d9242c0e..21cb006cc97f4 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv) } #endif +static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv, + loff_t *ppos) +{ + struct vm_area_struct *vma = vma_next(&priv->iter); + + if (vma) { + *ppos = vma->vm_start; + } else { + *ppos = -2UL; + vma = get_gate_vma(priv->mm); + } + + return vma; +} + static void *m_start(struct seq_file *m, loff_t *ppos) { struct proc_maps_private *priv = m->private; unsigned long last_addr = *ppos; struct mm_struct *mm; - struct vm_area_struct *vma; /* See m_next(). Zero at the start or after lseek. */ if (last_addr == -1UL) @@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos) return ERR_PTR(-EINTR); } + vma_iter_init(&priv->iter, mm, last_addr); hold_task_mempolicy(priv); - priv->tail_vma = get_gate_vma(mm); - - vma = find_vma(mm, last_addr); - if (vma) - return vma; + if (last_addr == -2UL) + return get_gate_vma(mm); - return priv->tail_vma; + return proc_get_vma(priv, ppos); } static void *m_next(struct seq_file *m, void *v, loff_t *ppos) { - struct proc_maps_private *priv = m->private; - struct vm_area_struct *next, *vma = v; - - if (vma == priv->tail_vma) - next = NULL; - else if (vma->vm_next) - next = vma->vm_next; - else - next = priv->tail_vma; - - *ppos = next ? next->vm_start : -1UL; - - return next; + if (*ppos == -2UL) { + *ppos = -1UL; + return NULL; + } + return proc_get_vma(m->private, ppos); } static void m_stop(struct seq_file *m, void *v) @@ -875,16 +879,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct mem_size_stats mss; - struct mm_struct *mm; + struct mm_struct *mm = priv->mm; struct vm_area_struct *vma; - unsigned long last_vma_end = 0; + unsigned long vma_start = 0, last_vma_end = 0; int ret = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); priv->task = get_proc_task(priv->inode); if (!priv->task) return -ESRCH; - mm = priv->mm; if (!mm || !mmget_not_zero(mm)) { ret = -ESRCH; goto out_put_task; @@ -897,8 +901,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v) goto out_put_mm; hold_task_mempolicy(priv); + vma = mas_find(&mas, 0); + + if (unlikely(!vma)) + goto empty_set; - for (vma = priv->mm->mmap; vma;) { + vma_start = vma->vm_start; + do { smap_gather_stats(vma, &mss, 0); last_vma_end = vma->vm_end; @@ -907,6 +916,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) * access it for write request. */ if (mmap_lock_is_contended(mm)) { + mas_pause(&mas); mmap_read_unlock(mm); ret = mmap_read_lock_killable(mm); if (ret) { @@ -950,7 +960,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) * contains last_vma_end. * Iterate VMA' from last_vma_end. */ - vma = find_vma(mm, last_vma_end - 1); + vma = mas_find(&mas, ULONG_MAX); /* Case 3 above */ if (!vma) break; @@ -964,11 +974,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v) smap_gather_stats(vma, &mss, last_vma_end); } /* Case 2 above */ - vma = vma->vm_next; - } + } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); - show_vma_header_prefix(m, priv->mm->mmap->vm_start, - last_vma_end, 0, 0, 0, 0); +empty_set: + show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0); seq_pad(m, ' '); seq_puts(m, "[rollup]\n"); @@ -1261,6 +1270,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, return -ESRCH; mm = get_task_mm(task); if (mm) { + MA_STATE(mas, &mm->mm_mt, 0, 0); struct mmu_notifier_range range; struct clear_refs_private cp = { .type = type, @@ -1280,7 +1290,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, } if (type == CLEAR_REFS_SOFT_DIRTY) { - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_for_each(&mas, vma, ULONG_MAX) { if (!(vma->vm_flags & VM_SOFTDIRTY)) continue; vma->vm_flags &= ~VM_SOFTDIRTY; @@ -1292,8 +1302,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, 0, NULL, mm, 0, -1UL); mmu_notifier_invalidate_range_start(&range); } - walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops, - &cp); + walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); if (type == CLEAR_REFS_SOFT_DIRTY) { mmu_notifier_invalidate_range_end(&range); flush_tlb_mm(mm); From 8968fa921b760ebb7e10f3060cf97eaeb8683747 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:07 +0000 Subject: [PATCH 055/321] userfaultfd: use maple tree iterator to iterate VMAs Don't use the mm_struct linked list or the vma->vm_next in prep for removal. Link: https://lkml.kernel.org/r/20220504011345.662299-29-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220615164150.652376-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-45-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 62 ++++++++++++++++++++++++----------- include/linux/userfaultfd_k.h | 7 ++-- mm/mmap.c | 7 ++-- 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index e943370107d06..fe6f283d26d55 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -613,14 +613,16 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, if (release_new_ctx) { struct vm_area_struct *vma; struct mm_struct *mm = release_new_ctx->mm; + VMA_ITERATOR(vmi, mm, 0); /* the various vma->vm_userfaultfd_ctx still points to it */ mmap_write_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) { if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~__VM_UFFD_FLAGS; } + } mmap_write_unlock(mm); userfaultfd_ctx_put(release_new_ctx); @@ -801,11 +803,13 @@ static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, return false; } -int userfaultfd_unmap_prep(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *unmaps) +int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *unmaps) { - for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { + VMA_ITERATOR(vmi, mm, start); + struct vm_area_struct *vma; + + for_each_vma_range(vmi, vma, end) { struct userfaultfd_unmap_ctx *unmap_ctx; struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; @@ -855,6 +859,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) /* len == 0 means wake all */ struct userfaultfd_wake_range range = { .len = 0, }; unsigned long new_flags; + MA_STATE(mas, &mm->mm_mt, 0, 0); WRITE_ONCE(ctx->released, true); @@ -871,7 +876,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) */ mmap_write_lock(mm); prev = NULL; - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_for_each(&mas, vma, ULONG_MAX) { cond_resched(); BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ !!(vma->vm_flags & __VM_UFFD_FLAGS)); @@ -885,10 +890,13 @@ static int userfaultfd_release(struct inode *inode, struct file *file) vma->vm_file, vma->vm_pgoff, vma_policy(vma), NULL_VM_UFFD_CTX, anon_vma_name(vma)); - if (prev) + if (prev) { + mas_pause(&mas); vma = prev; - else + } else { prev = vma; + } + vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; } @@ -1270,6 +1278,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, bool found; bool basic_ioctls; unsigned long start, end, vma_end; + MA_STATE(mas, &mm->mm_mt, 0, 0); user_uffdio_register = (struct uffdio_register __user *) arg; @@ -1312,7 +1321,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, goto out; mmap_write_lock(mm); - vma = find_vma_prev(mm, start, &prev); + mas_set(&mas, start); + vma = mas_find(&mas, ULONG_MAX); if (!vma) goto out_unlock; @@ -1337,7 +1347,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, */ found = false; basic_ioctls = false; - for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { + for (cur = vma; cur; cur = mas_next(&mas, end - 1)) { cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ @@ -1397,8 +1407,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, } BUG_ON(!found); - if (vma->vm_start < start) - prev = vma; + mas_set(&mas, start); + prev = mas_prev(&mas, 0); + if (prev != vma) + mas_next(&mas, ULONG_MAX); ret = 0; do { @@ -1428,6 +1440,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, ((struct vm_userfaultfd_ctx){ ctx }), anon_vma_name(vma)); if (prev) { + /* vma_merge() invalidated the mas */ + mas_pause(&mas); vma = prev; goto next; } @@ -1435,11 +1449,15 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, ret = split_vma(mm, vma, start, 1); if (ret) break; + /* split_vma() invalidated the mas */ + mas_pause(&mas); } if (vma->vm_end > end) { ret = split_vma(mm, vma, end, 0); if (ret) break; + /* split_vma() invalidated the mas */ + mas_pause(&mas); } next: /* @@ -1456,8 +1474,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, skip: prev = vma; start = vma->vm_end; - vma = vma->vm_next; - } while (vma && vma->vm_start < end); + vma = mas_next(&mas, end - 1); + } while (vma); out_unlock: mmap_write_unlock(mm); mmput(mm); @@ -1501,6 +1519,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, bool found; unsigned long start, end, vma_end; const void __user *buf = (void __user *)arg; + MA_STATE(mas, &mm->mm_mt, 0, 0); ret = -EFAULT; if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) @@ -1519,7 +1538,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, goto out; mmap_write_lock(mm); - vma = find_vma_prev(mm, start, &prev); + mas_set(&mas, start); + vma = mas_find(&mas, ULONG_MAX); if (!vma) goto out_unlock; @@ -1544,7 +1564,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, */ found = false; ret = -EINVAL; - for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { + for (cur = vma; cur; cur = mas_next(&mas, end - 1)) { cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ @@ -1564,8 +1584,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, } BUG_ON(!found); - if (vma->vm_start < start) - prev = vma; + mas_set(&mas, start); + prev = mas_prev(&mas, 0); + if (prev != vma) + mas_next(&mas, ULONG_MAX); ret = 0; do { @@ -1630,8 +1652,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, skip: prev = vma; start = vma->vm_end; - vma = vma->vm_next; - } while (vma && vma->vm_start < end); + vma = mas_next(&mas, end - 1); + } while (vma); out_unlock: mmap_write_unlock(mm); mmput(mm); diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 732b522bacb7e..eee374c29c859 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -173,9 +173,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *uf); +extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *uf); extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); @@ -256,7 +255,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma, return true; } -static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, +static inline int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf) { diff --git a/mm/mmap.c b/mm/mmap.c index 7424e7a5c9b0a..a17739cd5435e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2576,7 +2576,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ - error = userfaultfd_unmap_prep(vma, start, end, uf); + error = userfaultfd_unmap_prep(mm, start, end, uf); if (error) goto userfaultfd_error; @@ -3084,10 +3084,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, goto munmap_full_vma; } - vma_init(&unmap, mm); - unmap.vm_start = newbrk; - unmap.vm_end = oldbrk; - ret = userfaultfd_unmap_prep(&unmap, newbrk, oldbrk, uf); + ret = userfaultfd_unmap_prep(mm, newbrk, oldbrk, uf); if (ret) return ret; ret = 1; From 3ec56d8f4ce008944492dee5cc24a29fb57a51c8 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:07 +0000 Subject: [PATCH 056/321] ipc/shm: use VMA iterator instead of linked list The VMA iterator is faster than the linked llist, and it can be walked even when VMAs are being removed from the address space, so there's no need to keep track of 'next'. Link: https://lkml.kernel.org/r/20220504011345.662299-30-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-46-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- ipc/shm.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index b3048ebd5c315..7d86f058fb861 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1721,7 +1721,7 @@ long ksys_shmdt(char __user *shmaddr) #ifdef CONFIG_MMU loff_t size = 0; struct file *file; - struct vm_area_struct *next; + VMA_ITERATOR(vmi, mm, addr); #endif if (addr & ~PAGE_MASK) @@ -1751,12 +1751,9 @@ long ksys_shmdt(char __user *shmaddr) * match the usual checks anyway. So assume all vma's are * above the starting address given. */ - vma = find_vma(mm, addr); #ifdef CONFIG_MMU - while (vma) { - next = vma->vm_next; - + for_each_vma(vmi, vma) { /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it @@ -1774,6 +1771,7 @@ long ksys_shmdt(char __user *shmaddr) file = vma->vm_file; size = i_size_read(file_inode(vma->vm_file)); do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); + mas_pause(&vmi.mas); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next @@ -1781,10 +1779,9 @@ long ksys_shmdt(char __user *shmaddr) * searching for matching vma's. */ retval = 0; - vma = next; + vma = vma_next(&vmi); break; } - vma = next; } /* @@ -1794,17 +1791,19 @@ long ksys_shmdt(char __user *shmaddr) */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { - next = vma->vm_next; - /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && - (vma->vm_file == file)) + (vma->vm_file == file)) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); - vma = next; + mas_pause(&vmi.mas); + } + + vma = vma_next(&vmi); } #else /* CONFIG_MMU */ + vma = vma_lookup(mm, addr); /* under NOMMU conditions, the exact address to be destroyed must be * given */ From 4c72b74956cb60a957547b96eca079b863be8695 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:08 +0000 Subject: [PATCH 057/321] acct: use VMA iterator instead of linked list The VMA iterator is faster than the linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-31-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-47-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- kernel/acct.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/acct.c b/kernel/acct.c index 13706356ec54d..62200d799b9b0 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -555,15 +555,14 @@ void acct_collect(long exitcode, int group_dead) unsigned long vsize = 0; if (group_dead && current->mm) { + struct mm_struct *mm = current->mm; + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; - mmap_read_lock(current->mm); - vma = current->mm->mmap; - while (vma) { + mmap_read_lock(mm); + for_each_vma(vmi, vma) vsize += vma->vm_end - vma->vm_start; - vma = vma->vm_next; - } - mmap_read_unlock(current->mm); + mmap_read_unlock(mm); } spin_lock_irq(¤t->sighand->siglock); From cc82bbcb1e994e8fa852a6757651928835d319ba Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:08 +0000 Subject: [PATCH 058/321] perf: use VMA iterator The VMA iterator is faster than the linked list and removing the linked list will shrink the vm_area_struct. Link: https://lkml.kernel.org/r/20220504011345.662299-32-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-48-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- kernel/events/core.c | 3 ++- kernel/events/uprobes.c | 9 ++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 80782cddb1dab..61ad10862c213 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10210,8 +10210,9 @@ static void perf_addr_filter_apply(struct perf_addr_filter *filter, struct perf_addr_filter_range *fr) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (!vma->vm_file) continue; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2eaa327f8158d..401bc2d24ce06 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -349,9 +349,10 @@ static bool valid_ref_ctr_vma(struct uprobe *uprobe, static struct vm_area_struct * find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *tmp; - for (tmp = mm->mmap; tmp; tmp = tmp->vm_next) + for_each_vma(vmi, tmp) if (valid_ref_ctr_vma(uprobe, tmp)) return tmp; @@ -1231,11 +1232,12 @@ int uprobe_apply(struct inode *inode, loff_t offset, static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; int err = 0; mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long vaddr; loff_t offset; @@ -1983,9 +1985,10 @@ bool uprobe_deny_signal(void) static void mmf_recalc_uprobes(struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (!valid_vma(vma, false)) continue; /* From e02c4bca42a7612f23279ab3323f15b620799073 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:08 +0000 Subject: [PATCH 059/321] sched: use maple tree iterator to walk VMAs The linked list is slower than walking the VMAs using the maple tree. We can't use the VMA iterator here because it doesn't support moving to an earlier position. Link: https://lkml.kernel.org/r/20220504011345.662299-33-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-49-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- kernel/sched/fair.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 77b2048a93262..e8202b5cd3d5b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2734,6 +2734,7 @@ static void task_numa_work(struct callback_head *work) struct task_struct *p = current; struct mm_struct *mm = p->mm; u64 runtime = p->se.sum_exec_runtime; + MA_STATE(mas, &mm->mm_mt, 0, 0); struct vm_area_struct *vma; unsigned long start, end; unsigned long nr_pte_updates = 0; @@ -2790,13 +2791,16 @@ static void task_numa_work(struct callback_head *work) if (!mmap_read_trylock(mm)) return; - vma = find_vma(mm, start); + mas_set(&mas, start); + vma = mas_find(&mas, ULONG_MAX); if (!vma) { reset_ptenuma_scan(p); start = 0; - vma = mm->mmap; + mas_set(&mas, start); + vma = mas_find(&mas, ULONG_MAX); } - for (; vma; vma = vma->vm_next) { + + for (; vma; vma = mas_find(&mas, ULONG_MAX)) { if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { continue; From ae5d8f8fe91a4f7ac84ecc668dfb0e1aad691d72 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:08 +0000 Subject: [PATCH 060/321] fork: use VMA iterator The VMA iterator is faster than the linked list and removing the linked list will shrink the vm_area_struct. Link: https://lkml.kernel.org/r/20220504011345.662299-34-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-50-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- kernel/fork.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index a653b259c3212..7fcefef4c84d2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1294,13 +1294,16 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) /* Forbid mm->exe_file change if old file still mapped. */ old_exe_file = get_mm_exe_file(mm); if (old_exe_file) { + VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); - for (vma = mm->mmap; vma && !ret; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (!vma->vm_file) continue; if (path_equal(&vma->vm_file->f_path, - &old_exe_file->f_path)) + &old_exe_file->f_path)) { ret = -EBUSY; + break; + } } mmap_read_unlock(mm); fput(old_exe_file); From 0282cf8372ef4e11bf47dfb4edc4bc6de5716602 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:09 +0000 Subject: [PATCH 061/321] bpf: remove VMA linked list Use vma_next() and remove reference to the start of the linked list Link: https://lkml.kernel.org/r/20220504011345.662299-35-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-51-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- kernel/bpf/task_iter.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 8c921799def49..1c8debd42dc9f 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -299,8 +299,8 @@ struct bpf_iter_seq_task_vma_info { }; enum bpf_task_vma_iter_find_op { - task_vma_iter_first_vma, /* use mm->mmap */ - task_vma_iter_next_vma, /* use curr_vma->vm_next */ + task_vma_iter_first_vma, /* use find_vma() with addr 0 */ + task_vma_iter_next_vma, /* use vma_next() with curr_vma */ task_vma_iter_find_vma, /* use find_vma() to find next vma */ }; @@ -400,10 +400,10 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info) switch (op) { case task_vma_iter_first_vma: - curr_vma = curr_task->mm->mmap; + curr_vma = find_vma(curr_task->mm, 0); break; case task_vma_iter_next_vma: - curr_vma = curr_vma->vm_next; + curr_vma = find_vma(curr_task->mm, curr_vma->vm_end); break; case task_vma_iter_find_vma: /* We dropped mmap_lock so it is necessary to use find_vma @@ -417,7 +417,7 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info) if (curr_vma && curr_vma->vm_start == info->prev_vm_start && curr_vma->vm_end == info->prev_vm_end) - curr_vma = curr_vma->vm_next; + curr_vma = find_vma(curr_task->mm, curr_vma->vm_end); break; } if (!curr_vma) { From 55792f236e41adc7bbfd1e4f457d9c6d6cdfe674 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:09 +0000 Subject: [PATCH 062/321] mm/gup: use maple tree navigation instead of linked list Use find_vma_intersection() to locate the VMAs in __mm_populate() instead of using find_vma() and the linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-36-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-52-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/gup.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 2e663d09334e3..280b46857cb3c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1643,10 +1643,11 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) if (!locked) { locked = 1; mmap_read_lock(mm); - vma = find_vma(mm, nstart); + vma = find_vma_intersection(mm, nstart, end); } else if (nstart >= vma->vm_end) - vma = vma->vm_next; - if (!vma || vma->vm_start >= end) + vma = find_vma_intersection(mm, vma->vm_end, end); + + if (!vma) break; /* * Set [nstart; nend) to intersection of desired address From 18d7283a5057a833d05c5ab66ba95b68f529e4c5 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:09 +0000 Subject: [PATCH 063/321] mm/khugepaged: stop using vma linked list Use vma iterator & find_vma() instead of vma linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-37-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-53-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 60d742c33de34..79bba350f2964 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2266,11 +2266,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, split_huge_pmd_if_needed(vma, end); /* - * If we're also updating the vma->vm_next->vm_start, + * If we're also updating the next vma vm_start, * check if we need to split it. */ if (adjust_next > 0) { - struct vm_area_struct *next = vma->vm_next; + struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index aa5888cc9ef22..00531f56069ba 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2085,10 +2085,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { + struct vma_iterator vmi; struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; + unsigned long address; VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); @@ -2112,11 +2114,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; - if (likely(!khugepaged_test_exit(mm))) - vma = find_vma(mm, khugepaged_scan.address); progress++; - for (; vma; vma = vma->vm_next) { + if (unlikely(khugepaged_test_exit(mm))) + goto breakouterloop; + + address = khugepaged_scan.address; + vma_iter_init(&vmi, mm, address); + for_each_vma(vmi, vma) { unsigned long hstart, hend; cond_resched(); From 63c048e12085d7bbf8e9d27b9b4fd4b2dc8bf8ed Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:10 +0000 Subject: [PATCH 064/321] mm/ksm: use vma iterators instead of vma linked list Remove the use of the linked list for eventual removal. Link: https://lkml.kernel.org/r/20220504011345.662299-38-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-54-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/ksm.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 8d2dc501c92c6..73cbe2a628ed3 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -981,11 +981,13 @@ static int unmerge_and_remove_all_rmap_items(void) struct mm_slot, mm_list); spin_unlock(&ksm_mmlist_lock); - for (mm_slot = ksm_scan.mm_slot; - mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { + for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; + mm_slot = ksm_scan.mm_slot) { + VMA_ITERATOR(vmi, mm_slot->mm, 0); + mm = mm_slot->mm; mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (ksm_test_exit(mm)) break; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) @@ -2232,6 +2234,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) struct mm_slot *slot; struct vm_area_struct *vma; struct rmap_item *rmap_item; + struct vma_iterator vmi; int nid; if (list_empty(&ksm_mm_head.mm_list)) @@ -2290,13 +2293,13 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) } mm = slot->mm; + vma_iter_init(&vmi, mm, ksm_scan.address); + mmap_read_lock(mm); if (ksm_test_exit(mm)) - vma = NULL; - else - vma = find_vma(mm, ksm_scan.address); + goto no_vmas; - for (; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) @@ -2334,6 +2337,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) } if (ksm_test_exit(mm)) { +no_vmas: ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } From f2e5c0c397d830d374c46f9b73c6974fa796837e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:10 +0000 Subject: [PATCH 065/321] mm/madvise: use vma_find() instead of vma linked list madvise_walk_vmas() no longer uses linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-39-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-55-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/madvise.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/madvise.c b/mm/madvise.c index e55108d4e4b2c..5ae5cf4d1a9b0 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1238,7 +1238,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, if (start >= end) break; if (prev) - vma = prev->vm_next; + vma = find_vma(mm, prev->vm_end); else /* madvise_remove dropped mmap_lock */ vma = find_vma(mm, start); } From 1fd3cb689f47afc07337d834a40a0352c7e9d0a4 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:10 +0000 Subject: [PATCH 066/321] mm/memcontrol: stop using mm->highest_vm_end Pass through ULONG_MAX instead. Link: https://lkml.kernel.org/r/20220504011345.662299-40-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-56-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/memcontrol.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1497affe08c4e..d63c5856d006c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5856,7 +5856,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) unsigned long precharge; mmap_read_lock(mm); - walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); + walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL); mmap_read_unlock(mm); precharge = mc.precharge; @@ -6154,9 +6154,7 @@ static void mem_cgroup_move_charge(void) * When we have consumed all precharges and failed in doing * additional charge, the page walk just aborts. */ - walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, - NULL); - + walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL); mmap_read_unlock(mc.mm); atomic_dec(&mc.from->moving_account); } From 753b5f4164a618cf25f514688789067b0e75a87c Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:11 +0000 Subject: [PATCH 067/321] mm/mempolicy: use vma iterator & maple state instead of vma linked list Reworked the way mbind_range() finds the first VMA to reuse the maple state and limit the number of tree walks needed. Note, this drops the VM_BUG_ON(!vma) call, which would catch a start address higher than the last VMA. The code was written in a way that allowed no VMA updates to occur and still return success. There should be no functional change to this scenario with the new code. Link: https://lkml.kernel.org/r/20220504011345.662299-41-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-57-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Signed-off-by: Matthew Wilcox (Oracle) Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mempolicy.c | 56 ++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f4cd963550c1c..53064bfba061c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -381,9 +381,10 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) mpol_rebind_policy(vma->vm_policy, new); mmap_write_unlock(mm); } @@ -656,7 +657,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, static int queue_pages_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { - struct vm_area_struct *vma = walk->vma; + struct vm_area_struct *next, *vma = walk->vma; struct queue_pages *qp = walk->private; unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; @@ -671,9 +672,10 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, /* hole at head side of range */ return -EFAULT; } + next = find_vma(vma->vm_mm, vma->vm_end); if (!(flags & MPOL_MF_DISCONTIG_OK) && ((vma->vm_end < qp->end) && - (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) + (!next || vma->vm_end < next->vm_start))) /* hole at middle or tail of range */ return -EFAULT; @@ -787,26 +789,24 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { + MA_STATE(mas, &mm->mm_mt, start - 1, start - 1); struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; - unsigned long vmstart; - unsigned long vmend; - - vma = find_vma(mm, start); - VM_BUG_ON(!vma); - prev = vma->vm_prev; - if (start > vma->vm_start) - prev = vma; + prev = mas_find_rev(&mas, 0); + if (prev && (start < prev->vm_end)) + vma = prev; + else + vma = mas_next(&mas, end - 1); - for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { - vmstart = max(start, vma->vm_start); - vmend = min(end, vma->vm_end); + for (; vma; vma = mas_next(&mas, end - 1)) { + unsigned long vmstart = max(start, vma->vm_start); + unsigned long vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) - continue; + goto next; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); @@ -815,6 +815,8 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (prev) { + /* vma_merge() invalidated the mas */ + mas_pause(&mas); vma = prev; goto replace; } @@ -822,19 +824,25 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; + /* split_vma() invalidated the mas */ + mas_pause(&mas); } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; + /* split_vma() invalidated the mas */ + mas_pause(&mas); } - replace: +replace: err = vma_replace_policy(vma, new_pol); if (err) goto out; +next: + prev = vma; } - out: +out: return err; } @@ -1049,6 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; + struct vm_area_struct *vma; LIST_HEAD(pagelist); int err = 0; struct migration_target_control mtc = { @@ -1064,8 +1073,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ + vma = find_vma(mm, 0); VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); - queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, + queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { @@ -1195,14 +1205,13 @@ static struct page *new_page(struct page *page, unsigned long start) struct folio *dst, *src = page_folio(page); struct vm_area_struct *vma; unsigned long address; + VMA_ITERATOR(vmi, current->mm, start); gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; - vma = find_vma(current->mm, start); - while (vma) { + for_each_vma(vmi, vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; - vma = vma->vm_next; } if (folio_test_hugetlb(src)) @@ -1480,6 +1489,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le unsigned long vmend; unsigned long end; int err = -ENOENT; + VMA_ITERATOR(vmi, mm, start); start = untagged_addr(start); if (start & ~PAGE_MASK) @@ -1505,9 +1515,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le if (end == start) return 0; mmap_write_lock(mm); - vma = find_vma(mm, start); - for (; vma && vma->vm_start < end; vma = vma->vm_next) { - + for_each_vma_range(vmi, vma, end) { vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); new = mpol_dup(vma_policy(vma)); From 35e7ee967399f47c98466ac23a0c694e0fa4aff4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:11 +0000 Subject: [PATCH 068/321] mm/mlock: use vma iterator and maple state instead of vma linked list Handle overflow checking in count_mm_mlocked_page_nr() differently. Link: https://lkml.kernel.org/r/20220504011345.662299-42-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-58-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mlock.c | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/mm/mlock.c b/mm/mlock.c index 716caf8510431..c41604ba5197d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -471,6 +471,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, unsigned long nstart, end, tmp; struct vm_area_struct *vma, *prev; int error; + MA_STATE(mas, ¤t->mm->mm_mt, start, start); VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); @@ -479,13 +480,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, return -EINVAL; if (end == start) return 0; - vma = find_vma(current->mm, start); - if (!vma || vma->vm_start > start) + vma = mas_walk(&mas); + if (!vma) return -ENOMEM; - prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; + else + prev = mas_prev(&mas, 0); for (nstart = start ; ; ) { vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; @@ -505,7 +507,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, if (nstart >= end) break; - vma = prev->vm_next; + vma = find_vma(prev->vm_mm, prev->vm_end); if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; @@ -526,24 +528,23 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, { struct vm_area_struct *vma; unsigned long count = 0; + unsigned long end; + VMA_ITERATOR(vmi, mm, start); if (mm == NULL) mm = current->mm; - vma = find_vma(mm, start); - if (vma == NULL) - return 0; - - for (; vma ; vma = vma->vm_next) { - if (start >= vma->vm_end) - continue; - if (start + len <= vma->vm_start) - break; + /* Don't overflow past ULONG_MAX */ + if (unlikely(ULONG_MAX - len < start)) + end = ULONG_MAX; + else + end = start + len; + for_each_vma_range(vmi, vma, end) { if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); - if (start + len < vma->vm_end) { - count += start + len - vma->vm_start; + if (end < vma->vm_end) { + count += end - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; @@ -659,6 +660,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) */ static int apply_mlockall_flags(int flags) { + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; @@ -679,7 +681,7 @@ static int apply_mlockall_flags(int flags) to_add |= VM_LOCKONFAULT; } - for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { + mas_for_each(&mas, vma, ULONG_MAX) { vm_flags_t newflags; newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; @@ -687,6 +689,7 @@ static int apply_mlockall_flags(int flags) /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); + mas_pause(&mas); cond_resched(); } out: From a4663814a85c53e274ce491715d6cf95da107fe7 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:11 +0000 Subject: [PATCH 069/321] mm/mprotect: use maple tree navigation instead of vma linked list Link: https://lkml.kernel.org/r/20220504011345.662299-43-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-59-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mprotect.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index 996a97e213adc..ff0d4881cb071 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -668,6 +668,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, const bool rier = (current->personality & READ_IMPLIES_EXEC) && (prot & PROT_READ); struct mmu_gather tlb; + MA_STATE(mas, ¤t->mm->mm_mt, start, start); start = untagged_addr(start); @@ -699,7 +700,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) goto out; - vma = find_vma(current->mm, start); + vma = mas_find(&mas, ULONG_MAX); error = -ENOMEM; if (!vma) goto out; @@ -725,7 +726,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, if (start > vma->vm_start) prev = vma; else - prev = vma->vm_prev; + prev = mas_prev(&mas, 0); tlb_gather_mmu(&tlb, current->mm); for (nstart = start ; ; ) { @@ -788,7 +789,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, if (nstart >= end) break; - vma = prev->vm_next; + vma = find_vma(current->mm, prev->vm_end); if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; From 9a4bce44d4e76c2095e9d087022a0b422abbcdac Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:11 +0000 Subject: [PATCH 070/321] mm/mremap: use vma_find_intersection() instead of vma linked list Link: https://lkml.kernel.org/r/20220504011345.662299-44-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-60-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mremap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index e0fba90042466..8644ff278f029 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -716,7 +716,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) - vma->vm_next->vm_flags |= VM_ACCOUNT; + find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT; } return new_addr; @@ -866,9 +866,10 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; + if (end < vma->vm_end) /* overflow */ return 0; - if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ + if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) From 4da89e8db08efd55dd6fe108e0cc261fdad8cde3 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:12 +0000 Subject: [PATCH 071/321] mm/msync: use vma_find() instead of vma linked list Link: https://lkml.kernel.org/r/20220504011345.662299-45-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-61-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/msync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/msync.c b/mm/msync.c index 137d1c104f3e9..ac4c9bfea2e7f 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -104,7 +104,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) error = 0; goto out_unlock; } - vma = vma->vm_next; + vma = find_vma(mm, vma->vm_end); } } out_unlock: From 50224eb18f0b20d0be5fd185987d95335f23d087 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:12 +0000 Subject: [PATCH 072/321] mm/oom_kill: use maple tree iterators instead of vma linked list Link: https://lkml.kernel.org/r/20220504011345.662299-46-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-62-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/oom_kill.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3c6cf9e3cd66e..3996301450e8d 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -513,6 +513,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm) { struct vm_area_struct *vma; bool ret = true; + VMA_ITERATOR(vmi, mm, 0); /* * Tell all users of get_user/copy_from_user etc... that the content @@ -522,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm) */ set_bit(MMF_UNSTABLE, &mm->flags); - for (vma = mm->mmap ; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP)) continue; From 1005da229c61338cf00412b249a02b944c449837 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:12 +0000 Subject: [PATCH 073/321] mm/pagewalk: use vma_find() instead of vma linked list walk_page_range() no longer uses the one vma linked list reference. Link: https://lkml.kernel.org/r/20220504011345.662299-47-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-63-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/pagewalk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 9b3db11a4d1db..53e5c145fcce5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -456,7 +456,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, } else { /* inside vma */ walk.vma = vma; next = min(end, vma->vm_end); - vma = vma->vm_next; + vma = find_vma(mm, vma->vm_end); err = walk_page_test(start, next, &walk); if (err > 0) { From 21bc68bae40097ef004a1e7cbdab37124b0d668f Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:12 +0000 Subject: [PATCH 074/321] mm/swapfile: use vma iterator instead of vma linked list unuse_mm() no longer needs to reference the linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-48-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-64-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/swapfile.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 1fdccd2f1422e..5c8681a3f1d9d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1990,14 +1990,16 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type) { struct vm_area_struct *vma; int ret = 0; + VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (vma->anon_vma) { ret = unuse_vma(vma, type); if (ret) break; } + cond_resched(); } mmap_read_unlock(mm); From bf1e0255d473798246923d890c06d92c947c0109 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:13 +0000 Subject: [PATCH 075/321] i915: use the VMA iterator Replace the linked list in probe_range() with the VMA iterator. Link: https://lkml.kernel.org/r/20220504011345.662299-49-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-65-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 094f06b4ce335..a509f7da104be 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { static int probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { - const unsigned long end = addr + len; + VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; - int ret = -EFAULT; mmap_read_lock(mm); - for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { + for_each_vma_range(vmi, vma, addr + len) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; @@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) break; - if (vma->vm_end >= end) { - ret = 0; - break; - } - addr = vma->vm_end; } mmap_read_unlock(mm); - return ret; + if (vma) + return -EFAULT; + return 0; } /* From 906e3d163689f401b937e33e2e5a8365217637da Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 21 Jun 2022 20:47:13 +0000 Subject: [PATCH 076/321] nommu: remove uses of VMA linked list Use the maple tree or VMA iterator instead. This is faster and will allow us to shrink the VMA. Link: https://lkml.kernel.org/r/20220504011345.662299-50-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-66-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/nommu.c | 135 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 101 insertions(+), 34 deletions(-) diff --git a/mm/nommu.c b/mm/nommu.c index 4ae252a785dcb..f6b187090d958 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -557,26 +557,14 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) mas_store_prealloc(mas, NULL); } -/* - * add a VMA into a process's mm_struct in the appropriate place in the list - * and tree and add to the address space's page tree also if not an anonymous - * page - * - should be called with mm->mmap_lock held writelocked - */ -static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) +static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) { - struct address_space *mapping; - struct vm_area_struct *prev; - MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); - - BUG_ON(!vma->vm_region); - mm->map_count++; vma->vm_mm = mm; /* add the VMA to the mapping */ if (vma->vm_file) { - mapping = vma->vm_file->f_mapping; + struct address_space *mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); flush_dcache_mmap_lock(mapping); @@ -584,21 +572,52 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } +} - prev = mas_prev(&mas, 0); - mas_reset(&mas); +/* + * mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm(). + * @mas: The maple state with preallocations. + * @mm: The mm_struct + * @vma: The vma to add + * + */ +static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm, + struct vm_area_struct *vma) +{ + struct vm_area_struct *prev; + + BUG_ON(!vma->vm_region); + + setup_vma_to_mm(vma, mm); + + prev = mas_prev(mas, 0); + mas_reset(mas); /* add the VMA to the tree */ - vma_mas_store(vma, &mas); + vma_mas_store(vma, mas); __vma_link_list(mm, vma, prev); } /* - * delete a VMA from its owning mm_struct and address space + * add a VMA into a process's mm_struct in the appropriate place in the list + * and tree and add to the address space's page tree also if not an anonymous + * page + * - should be called with mm->mmap_lock held writelocked */ -static void delete_vma_from_mm(struct vm_area_struct *vma) +static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); + MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); + + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + pr_warn("Allocation of vma tree for process %d failed\n", + current->pid); + return -ENOMEM; + } + mas_add_vma_to_mm(&mas, mm, vma); + return 0; +} +static void cleanup_vma_from_mm(struct vm_area_struct *vma) +{ vma->vm_mm->map_count--; /* remove the VMA from the mapping */ if (vma->vm_file) { @@ -611,10 +630,25 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } +} +/* + * delete a VMA from its owning mm_struct and address space + */ +static int delete_vma_from_mm(struct vm_area_struct *vma) +{ + MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); + + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + pr_warn("Allocation of vma tree for process %d failed\n", + current->pid); + return -ENOMEM; + } + cleanup_vma_from_mm(vma); /* remove from the MM's tree and list */ vma_mas_remove(vma, &mas); __vma_unlink_list(vma->vm_mm, vma); + return 0; } /* @@ -1024,6 +1058,7 @@ unsigned long do_mmap(struct file *file, vm_flags_t vm_flags; unsigned long capabilities, result; int ret; + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); *populate = 0; @@ -1042,6 +1077,7 @@ unsigned long do_mmap(struct file *file, * now know into VMA flags */ vm_flags = determine_vm_flags(file, prot, flags, capabilities); + /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); if (!region) @@ -1051,6 +1087,9 @@ unsigned long do_mmap(struct file *file, if (!vma) goto error_getting_vma; + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + goto error_maple_preallocate; + region->vm_usage = 1; region->vm_flags = vm_flags; region->vm_pgoff = pgoff; @@ -1191,7 +1230,7 @@ unsigned long do_mmap(struct file *file, current->mm->total_vm += len >> PAGE_SHIFT; share: - add_vma_to_mm(current->mm, vma); + mas_add_vma_to_mm(&mas, current->mm, vma); /* we flush the region from the icache only when the first executable * mapping of it is made */ @@ -1217,6 +1256,7 @@ unsigned long do_mmap(struct file *file, sharing_violation: up_write(&nommu_region_sem); + mas_destroy(&mas); pr_warn("Attempt to share mismatched mappings\n"); ret = -EINVAL; goto error; @@ -1233,6 +1273,14 @@ unsigned long do_mmap(struct file *file, len, current->pid); show_free_areas(0, NULL); return -ENOMEM; + +error_maple_preallocate: + kmem_cache_free(vm_region_jar, region); + vm_area_free(vma); + pr_warn("Allocation of vma tree for process %d failed\n", current->pid); + show_free_areas(0, NULL); + return -ENOMEM; + } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, @@ -1298,6 +1346,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *new; struct vm_region *region; unsigned long npages; + MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); /* we're only permitted to split anonymous regions (these should have * only a single usage on the region) */ @@ -1333,7 +1382,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); - delete_vma_from_mm(vma); down_write(&nommu_region_sem); delete_nommu_region(vma->vm_region); if (new_below) { @@ -1346,8 +1394,17 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, add_nommu_region(vma->vm_region); add_nommu_region(new->vm_region); up_write(&nommu_region_sem); - add_vma_to_mm(mm, vma); - add_vma_to_mm(mm, new); + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + pr_warn("Allocation of vma tree for process %d failed\n", + current->pid); + return -ENOMEM; + } + + setup_vma_to_mm(vma, mm); + setup_vma_to_mm(new, mm); + mas_set_range(&mas, vma->vm_start, vma->vm_end - 1); + mas_store(&mas, vma); + vma_mas_store(new, &mas); return 0; } @@ -1363,12 +1420,14 @@ static int shrink_vma(struct mm_struct *mm, /* adjust the VMA's pointers, which may reposition it in the MM's tree * and list */ - delete_vma_from_mm(vma); + if (delete_vma_from_mm(vma)) + return -ENOMEM; if (from > vma->vm_start) vma->vm_end = from; else vma->vm_start = to; - add_vma_to_mm(mm, vma); + if (add_vma_to_mm(mm, vma)) + return -ENOMEM; /* cut the backing region down to size */ region = vma->vm_region; @@ -1396,9 +1455,10 @@ static int shrink_vma(struct mm_struct *mm, */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { + MA_STATE(mas, &mm->mm_mt, start, start); struct vm_area_struct *vma; unsigned long end; - int ret; + int ret = 0; len = PAGE_ALIGN(len); if (len == 0) @@ -1407,7 +1467,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list end = start + len; /* find the first potentially overlapping VMA */ - vma = find_vma(mm, start); + vma = mas_find(&mas, end - 1); if (!vma) { static int limit; if (limit < 5) { @@ -1426,7 +1486,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list return -EINVAL; if (end == vma->vm_end) goto erase_whole_vma; - vma = vma->vm_next; + vma = mas_next(&mas, end - 1); } while (vma); return -EINVAL; } else { @@ -1448,9 +1508,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list } erase_whole_vma: - delete_vma_from_mm(vma); + if (delete_vma_from_mm(vma)) + ret = -ENOMEM; delete_vma(mm, vma); - return 0; + return ret; } int vm_munmap(unsigned long addr, size_t len) @@ -1475,6 +1536,7 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) */ void exit_mmap(struct mm_struct *mm) { + VMA_ITERATOR(vmi, mm, 0); struct vm_area_struct *vma; if (!mm) @@ -1482,13 +1544,18 @@ void exit_mmap(struct mm_struct *mm) mm->total_vm = 0; - while ((vma = mm->mmap)) { - mm->mmap = vma->vm_next; - delete_vma_from_mm(vma); + /* + * Lock the mm to avoid assert complaining even though this is the only + * user of the mm + */ + mmap_write_lock(mm); + for_each_vma(vmi, vma) { + cleanup_vma_from_mm(vma); delete_vma(mm, vma); cond_resched(); } __mt_destroy(&mm->mm_mt); + mmap_write_unlock(mm); } int vm_brk(unsigned long addr, unsigned long len) From 635a2c142ece801f6ae035db05e85c3dbf50802b Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:13 +0000 Subject: [PATCH 077/321] riscv: use vma iterator for vdso Remove the linked list use in favour of the vma iterator. Link: https://lkml.kernel.org/r/20220504011345.662299-51-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-67-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- arch/riscv/kernel/vdso.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 69b05b6c181b6..692e7ae3dcb80 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -114,11 +114,12 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); struct __vdso_info *vdso_info = mm->context.vdso_info; mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (vma_is_special_mapping(vma, vdso_info->dm)) From 3968bf068b0757cc6ef651eeee2497d690527b17 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:14 +0000 Subject: [PATCH 078/321] mm: remove the vma linked list Replace any vm_next use with vma_find(). Update free_pgtables(), unmap_vmas(), and zap_page_range() to use the maple tree. Use the new free_pgtables() and unmap_vmas() in do_mas_align_munmap(). At the same time, alter the loop to be more compact. Now that free_pgtables() and unmap_vmas() take a maple tree as an argument, rearrange do_mas_align_munmap() to use the new tree to hold the vmas to remove. Remove __vma_link_list() and __vma_unlink_list() as they are exclusively used to update the linked list Drop linked list update from __insert_vm_struct(). Rework validation of tree as it was depending on the linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-52-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220513141548.2019143-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-68-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Cc: Hulk Robot Cc: Yang Yingliang Signed-off-by: Andrew Morton --- include/linux/mm.h | 5 +- include/linux/mm_types.h | 4 - kernel/fork.c | 19 +- mm/debug.c | 14 +- mm/internal.h | 8 +- mm/memory.c | 33 ++- mm/mmap.c | 447 +++++++++++++++++---------------------- mm/nommu.c | 6 - mm/util.c | 40 ---- 9 files changed, 229 insertions(+), 347 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 376ccbcac8c2a..1e0fc5f4fc3db 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1892,8 +1892,9 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); -void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long start, unsigned long end); +void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, + struct vm_area_struct *start_vma, unsigned long start, + unsigned long end); struct mmu_notifier_range; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 040bfc207cb1a..5cc2cb396ca7f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -403,8 +403,6 @@ struct vm_area_struct { unsigned long vm_end; /* The first byte after our end address within vm_mm. */ - /* linked list of VM areas per task, sorted by address */ - struct vm_area_struct *vm_next, *vm_prev; struct mm_struct *vm_mm; /* The address space we belong to. */ /* @@ -468,7 +466,6 @@ struct vm_area_struct { struct kioctx_table; struct mm_struct { struct { - struct vm_area_struct *mmap; /* list of VMAs */ struct maple_tree mm_mt; #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, @@ -483,7 +480,6 @@ struct mm_struct { unsigned long mmap_compat_legacy_base; #endif unsigned long task_size; /* size of task vm space */ - unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; #ifdef CONFIG_MEMBARRIER diff --git a/kernel/fork.c b/kernel/fork.c index 7fcefef4c84d2..889f318db3c11 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -474,7 +474,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) */ *new = data_race(*orig); INIT_LIST_HEAD(&new->anon_vma_chain); - new->vm_next = new->vm_prev = NULL; dup_anon_vma_name(orig, new); } return new; @@ -579,7 +578,7 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { - struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct vm_area_struct *mpnt, *tmp; int retval; unsigned long charge = 0; MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); @@ -606,18 +605,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; - pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) goto out; khugepaged_fork(mm, oldmm); - retval = mas_expected_entries(&mas, oldmm->map_count); - if (retval) - goto out; - - prev = NULL; - retval = mas_expected_entries(&mas, oldmm->map_count); if (retval) goto out; @@ -689,14 +681,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, if (is_vm_hugetlb_page(tmp)) reset_vma_resv_huge_pages(tmp); - /* - * Link in the new vma and copy the page table entries. - */ - *pprev = tmp; - pprev = &tmp->vm_next; - tmp->vm_prev = prev; - prev = tmp; - /* Link the vma into the MT */ mas.index = tmp->vm_start; mas.last = tmp->vm_end - 1; @@ -1119,7 +1103,6 @@ static void mm_init_uprobes_state(struct mm_struct *mm) static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { - mm->mmap = NULL; mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); atomic_set(&mm->mm_users, 1); diff --git a/mm/debug.c b/mm/debug.c index 2d625ca0e3269..0fd15ba70d163 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -139,13 +139,11 @@ EXPORT_SYMBOL(dump_page); void dump_vma(const struct vm_area_struct *vma) { - pr_emerg("vma %px start %px end %px\n" - "next %px prev %px mm %px\n" + pr_emerg("vma %px start %px end %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", - vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, - vma->vm_prev, vma->vm_mm, + vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, @@ -155,11 +153,11 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %px mmap %px task_size %lu\n" + pr_emerg("mm %px task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif - "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" + "mmap_base %lu mmap_legacy_base %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" @@ -183,11 +181,11 @@ void dump_mm(const struct mm_struct *mm) "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, mm->task_size, + mm, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif - mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, + mm->mmap_base, mm->mmap_legacy_base, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), diff --git a/mm/internal.h b/mm/internal.h index 7cf12a15475b9..eba79d1d3b06a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -84,8 +84,9 @@ void folio_rotate_reclaimable(struct folio *folio); bool __folio_end_writeback(struct folio *folio); void deactivate_file_folio(struct folio *folio); -void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); +void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + struct vm_area_struct *start_vma, unsigned long floor, + unsigned long ceiling); void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); struct zap_details; @@ -479,9 +480,6 @@ static inline bool is_data_mapping(vm_flags_t flags) } /* mm/util.c */ -void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev); -void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); struct anon_vma *folio_anon_vma(struct folio *folio); #ifdef CONFIG_MMU diff --git a/mm/memory.c b/mm/memory.c index 1395b42071d39..176f76323c007 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -402,12 +402,21 @@ void free_pgd_range(struct mmu_gather *tlb, } while (pgd++, addr = next, addr != end); } -void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, - unsigned long floor, unsigned long ceiling) +void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + struct vm_area_struct *vma, unsigned long floor, + unsigned long ceiling) { - while (vma) { - struct vm_area_struct *next = vma->vm_next; + MA_STATE(mas, mt, vma->vm_end, vma->vm_end); + + do { unsigned long addr = vma->vm_start; + struct vm_area_struct *next; + + /* + * Note: USER_PGTABLES_CEILING may be passed as ceiling and may + * be 0. This will underflow and is okay. + */ + next = mas_find(&mas, ceiling - 1); /* * Hide vma from rmap and truncate_pagecache before freeing @@ -426,7 +435,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; - next = vma->vm_next; + next = mas_find(&mas, ceiling - 1); unlink_anon_vmas(vma); unlink_file_vma(vma); } @@ -434,7 +443,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, floor, next ? next->vm_start : ceiling); } vma = next; - } + } while (vma); } void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) @@ -1705,7 +1714,7 @@ static void unmap_single_vma(struct mmu_gather *tlb, * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ -void unmap_vmas(struct mmu_gather *tlb, +void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) { @@ -1715,12 +1724,14 @@ void unmap_vmas(struct mmu_gather *tlb, /* Careful - we need to zap private pages too! */ .even_cows = true, }; + MA_STATE(mas, mt, vma->vm_end, vma->vm_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(&range); - for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) + do { unmap_single_vma(tlb, vma, start_addr, end_addr, &details); + } while ((vma = mas_find(&mas, end_addr - 1)) != NULL); mmu_notifier_invalidate_range_end(&range); } @@ -1735,8 +1746,11 @@ void unmap_vmas(struct mmu_gather *tlb, void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size) { + struct maple_tree *mt = &vma->vm_mm->mm_mt; + unsigned long end = start + size; struct mmu_notifier_range range; struct mmu_gather tlb; + MA_STATE(mas, mt, vma->vm_end, vma->vm_end); lru_add_drain(); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, @@ -1744,8 +1758,9 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, vma->vm_mm); update_hiwater_rss(vma->vm_mm); mmu_notifier_invalidate_range_start(&range); - for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) + do { unmap_single_vma(&tlb, vma, start, range.end, NULL); + } while ((vma = mas_find(&mas, end - 1)) != NULL); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); } diff --git a/mm/mmap.c b/mm/mmap.c index a17739cd5435e..5baedf4d46502 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -75,9 +75,10 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); -static void unmap_region(struct mm_struct *mm, +static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, struct vm_area_struct *vma, struct vm_area_struct *prev, - unsigned long start, unsigned long end); + struct vm_area_struct *next, unsigned long start, + unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected @@ -177,12 +178,10 @@ void unlink_file_vma(struct vm_area_struct *vma) } /* - * Close a vm structure and free it, returning the next. + * Close a vm structure and free it. */ -static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +static void remove_vma(struct vm_area_struct *vma) { - struct vm_area_struct *next = vma->vm_next; - might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); @@ -190,7 +189,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); - return next; } /* @@ -215,8 +213,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, unsigned long newbrk, unsigned long oldbrk, struct list_head *uf); static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *brkvma, - unsigned long addr, unsigned long request, - unsigned long flags); + unsigned long addr, unsigned long request, unsigned long flags); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; @@ -285,7 +282,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * before calling do_brk_munmap(). */ mm->brk = brk; - mas.last = oldbrk - 1; ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf); if (ret == 1) { downgraded = true; @@ -340,44 +336,22 @@ extern void mt_dump(const struct maple_tree *mt); static void validate_mm_mt(struct mm_struct *mm) { struct maple_tree *mt = &mm->mm_mt; - struct vm_area_struct *vma_mt, *vma = mm->mmap; + struct vm_area_struct *vma_mt; MA_STATE(mas, mt, 0, 0); mt_validate(&mm->mm_mt); - mas_for_each(&mas, vma_mt, ULONG_MAX) { - if (xa_is_zero(vma_mt)) - continue; - - if (!vma) - break; - if ((vma != vma_mt) || - (vma->vm_start != vma_mt->vm_start) || - (vma->vm_end != vma_mt->vm_end) || - (vma->vm_start != mas.index) || - (vma->vm_end - 1 != mas.last)) { + mas_for_each(&mas, vma_mt, ULONG_MAX) { + if ((vma_mt->vm_start != mas.index) || + (vma_mt->vm_end - 1 != mas.last)) { pr_emerg("issue in %s\n", current->comm); dump_stack(); dump_vma(vma_mt); - pr_emerg("and vm_next\n"); - dump_vma(vma->vm_next); pr_emerg("mt piv: %px %lu - %lu\n", vma_mt, mas.index, mas.last); pr_emerg("mt vma: %px %lu - %lu\n", vma_mt, vma_mt->vm_start, vma_mt->vm_end); - if (vma->vm_prev) { - pr_emerg("ll prev: %px %lu - %lu\n", - vma->vm_prev, vma->vm_prev->vm_start, - vma->vm_prev->vm_end); - } - pr_emerg("ll vma: %px %lu - %lu\n", vma, - vma->vm_start, vma->vm_end); - if (vma->vm_next) { - pr_emerg("ll next: %px %lu - %lu\n", - vma->vm_next, vma->vm_next->vm_start, - vma->vm_next->vm_end); - } mt_dump(mas.tree); if (vma_mt->vm_end != mas.last + 1) { @@ -394,23 +368,19 @@ static void validate_mm_mt(struct mm_struct *mm) } VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); } - VM_BUG_ON(vma != vma_mt); - vma = vma->vm_next; - } - VM_BUG_ON(vma); } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; - unsigned long highest_address = 0; - struct vm_area_struct *vma = mm->mmap; + struct vm_area_struct *vma; + MA_STATE(mas, &mm->mm_mt, 0, 0); validate_mm_mt(mm); - while (vma) { + mas_for_each(&mas, vma, ULONG_MAX) { #ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; @@ -422,18 +392,10 @@ static void validate_mm(struct mm_struct *mm) anon_vma_unlock_read(anon_vma); } #endif - - highest_address = vm_end_gap(vma); - vma = vma->vm_next; i++; } if (i != mm->map_count) { - pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); - bug = 1; - } - if (highest_address != mm->highest_vm_end) { - pr_emerg("mm->highest_vm_end %lx, found %lx\n", - mm->highest_vm_end, highest_address); + pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); @@ -493,29 +455,13 @@ bool range_has_overlap(struct mm_struct *mm, unsigned long start, struct vm_area_struct *existing; MA_STATE(mas, &mm->mm_mt, start, start); + rcu_read_lock(); existing = mas_find(&mas, end - 1); *pprev = mas_prev(&mas, 0); + rcu_read_unlock(); return existing ? true : false; } -/* - * __vma_next() - Get the next VMA. - * @mm: The mm_struct. - * @vma: The current vma. - * - * If @vma is NULL, return the first vma in the mm. - * - * Returns: The next VMA after @vma. - */ -static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, - struct vm_area_struct *vma) -{ - if (!vma) - return mm->mmap; - - return vma->vm_next; -} - static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { @@ -600,8 +546,7 @@ static inline void vma_mas_szero(struct ma_state *mas, unsigned long start, mas_store_prealloc(mas, NULL); } -static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev) +static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) { MA_STATE(mas, &mm->mm_mt, 0, 0); struct address_space *mapping = NULL; @@ -615,7 +560,6 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, } vma_mas_store(vma, &mas); - __vma_link_list(mm, vma, prev); __vma_link_file(vma); if (mapping) @@ -626,22 +570,6 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, return 0; } -/* - * Helper for vma_adjust() in the split_vma insert case: insert a vma into the - * mm's list and the mm tree. It has already been inserted into the interval tree. - */ -static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma, unsigned long location) -{ - struct vm_area_struct *prev; - - mas_set(mas, location); - prev = mas_prev(mas, 0); - vma_mas_store(vma, mas); - __vma_link_list(mm, vma, prev); - mm->map_count++; -} - /* * vma_expand - Expand an existing VMA * @@ -720,15 +648,8 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, } /* Expanding over the next vma */ - if (remove_next) { - /* Remove from mm linked list - also updates highest_vm_end */ - __vma_unlink_list(mm, next); - - if (file) - __remove_shared_vm_struct(next, file, mapping); - - } else if (!next) { - mm->highest_vm_end = vm_end_gap(vma); + if (remove_next && file) { + __remove_shared_vm_struct(next, file, mapping); } if (file) { @@ -786,7 +707,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, int remove_next = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); struct vm_area_struct *exporter = NULL, *importer = NULL; - unsigned long ll_prev = vma->vm_start; /* linked list prev. */ if (next && !insert) { if (end >= next->vm_end) { @@ -832,7 +752,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * next, if the vma overlaps with it. */ if (remove_next == 2 && !next->anon_vma) - exporter = next->vm_next; + exporter = find_vma(mm, next->vm_end); } else if (end > next->vm_start) { /* @@ -931,17 +851,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (vma->vm_end > end) { if (!insert || (insert->vm_start != end)) { vma_mas_szero(&mas, end, vma->vm_end); + mas_reset(&mas); VM_WARN_ON(insert && insert->vm_end < vma->vm_end); - } else if (insert->vm_start == end) { - ll_prev = vma->vm_end; } } else { vma_changed = true; } vma->vm_end = end; - if (!next) - mm->highest_vm_end = vm_end_gap(vma); } if (vma_changed) @@ -961,17 +878,17 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, flush_dcache_mmap_unlock(mapping); } - if (remove_next) { - __vma_unlink_list(mm, next); - if (file) - __remove_shared_vm_struct(next, file, mapping); + if (remove_next && file) { + __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - __insert_vm_struct(mm, &mas, insert, ll_prev); + mas_reset(&mas); + vma_mas_store(insert, &mas); + mm->map_count++; } if (anon_vma) { @@ -1010,8 +927,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, /* * If "next" was removed and vma->vm_end was * expanded (up) over it, in turn - * "next->vm_prev->vm_end" changed and the - * "vma->vm_next" gap must be updated. + * "next->prev->vm_end" changed and the + * "vma->next" gap must be updated. */ next = next_next; } else { @@ -1032,34 +949,15 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, remove_next = 1; end = next->vm_end; goto again; - } else if (!next) { - /* - * If remove_next == 2 we obviously can't - * reach this path. - * - * If remove_next == 3 we can't reach this - * path because pre-swap() next is always not - * NULL. pre-swap() "next" is not being - * removed and its next->vm_end is not altered - * (and furthermore "end" already matches - * next->vm_end in remove_next == 3). - * - * We reach this only in the remove_next == 1 - * case if the "next" vma that was removed was - * the highest vma of the mm. However in such - * case next->vm_end == "end" and the extended - * "vma" has vma->vm_end == next->vm_end so - * mm->highest_vm_end doesn't need any update - * in remove_next == 1 case. - */ - VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } - if (insert && file) + if (insert && file) { uprobe_mmap(insert); + } mas_destroy(&mas); validate_mm(mm); + return 0; } @@ -1219,10 +1117,10 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, if (vm_flags & VM_SPECIAL) return NULL; - next = __vma_next(mm, prev); + next = find_vma(mm, prev ? prev->vm_end : 0); area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ - next = next->vm_next; + next = find_vma(mm, next->vm_end); /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); @@ -1356,18 +1254,24 @@ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_ */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { + MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); struct anon_vma *anon_vma = NULL; + struct vm_area_struct *prev, *next; /* Try next first. */ - if (vma->vm_next) { - anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); + next = mas_walk(&mas); + if (next) { + anon_vma = reusable_anon_vma(next, vma, next); if (anon_vma) return anon_vma; } + prev = mas_prev(&mas, 0); + VM_BUG_ON_VMA(prev != vma, vma); + prev = mas_prev(&mas, 0); /* Try prev next. */ - if (vma->vm_prev) - anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); + if (prev) + anon_vma = reusable_anon_vma(prev, prev, vma); /* * We might reach here with anon_vma == NULL if we can't find @@ -2134,8 +2038,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; - next = vma->vm_next; - if (next && next->vm_start < gap_addr && vma_is_accessible(next)) { + next = find_vma_intersection(mm, vma->vm_end, gap_addr); + if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ @@ -2186,8 +2090,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Overwrite old entry in mtree. */ vma_mas_store(vma, &mas); anon_vma_interval_tree_post_update_vma(vma); - if (!vma->vm_next) - mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2207,16 +2109,16 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); struct vm_area_struct *prev; int error = 0; - MA_STATE(mas, &mm->mm_mt, 0, 0); address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; /* Enforce stack_guard_gap */ - prev = vma->vm_prev; + prev = mas_prev(&mas, 0); /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && vma_is_accessible(prev)) { @@ -2352,25 +2254,26 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL_GPL(find_extend_vma); /* - * Ok - we have the memory areas we should free on the vma list, - * so release them, and do the vma updates. + * Ok - we have the memory areas we should free on a maple tree so release them, + * and do the vma updates. * * Called with the mm semaphore held. */ -static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) { unsigned long nr_accounted = 0; + struct vm_area_struct *vma; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); - do { + mas_for_each(mas, vma, ULONG_MAX) { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); - vma = remove_vma(vma); - } while (vma); + remove_vma(vma); + } vm_unacct_memory(nr_accounted); validate_mm(mm); } @@ -2380,18 +2283,18 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) * * Called with the mm semaphore held. */ -static void unmap_region(struct mm_struct *mm, +static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, struct vm_area_struct *vma, struct vm_area_struct *prev, + struct vm_area_struct *next, unsigned long start, unsigned long end) { - struct vm_area_struct *next = __vma_next(mm, prev); struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, start, end); - free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, + unmap_vmas(&tlb, mt, vma, start, end); + free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb); } @@ -2475,24 +2378,17 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } -static inline int -unlock_range(struct vm_area_struct *start, struct vm_area_struct **tail, - unsigned long limit) +static inline int munmap_sidetree(struct vm_area_struct *vma, + struct ma_state *mas_detach) { - struct mm_struct *mm = start->vm_mm; - struct vm_area_struct *tmp = start; - int count = 0; - - while (tmp && tmp->vm_start < limit) { - *tail = tmp; - count++; - if (tmp->vm_flags & VM_LOCKED) - mm->locked_vm -= vma_pages(tmp); + mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); + if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) + return -ENOMEM; - tmp = tmp->vm_next; - } + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm -= vma_pages(vma); - return count; + return 0; } /* @@ -2512,9 +2408,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf, bool downgrade) { - struct vm_area_struct *prev, *last; + struct vm_area_struct *prev, *next = NULL; + struct maple_tree mt_detach; + int count = 0; int error = -ENOMEM; - /* we have start < vma->vm_end */ + MA_STATE(mas_detach, &mt_detach, 0, 0); + mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&mt_detach, &mm->mmap_lock); if (mas_preallocate(mas, vma, GFP_KERNEL)) return -ENOMEM; @@ -2527,6 +2427,8 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ + + /* Does it split the first one? */ if (start > vma->vm_start) { /* @@ -2537,35 +2439,58 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) goto map_count_exceeded; + /* + * mas_pause() is not needed since mas->index needs to be set + * differently than vma->vm_end anyways. + */ error = __split_vma(mm, vma, start, 0); if (error) - goto split_failed; + goto start_split_failed; - prev = vma; - vma = __vma_next(mm, prev); - mas->index = start; - mas_reset(mas); - } else { - prev = vma->vm_prev; + mas_set(mas, start); + vma = mas_walk(mas); } - if (vma->vm_end >= end) - last = vma; - else - last = find_vma_intersection(mm, end - 1, end); + prev = mas_prev(mas, 0); + if (unlikely((!prev))) + mas_set(mas, start); - /* Does it split the last one? */ - if (last && end < last->vm_end) { - error = __split_vma(mm, last, end, 1); + /* + * Detach a range of VMAs from the mm. Using next as a temp variable as + * it is always overwritten. + */ + mas_for_each(mas, next, end - 1) { + /* Does it split the end? */ + if (next->vm_end > end) { + struct vm_area_struct *split; - if (error) - goto split_failed; + error = __split_vma(mm, next, end, 1); + if (error) + goto end_split_failed; + + mas_set(mas, end); + split = mas_prev(mas, 0); + if (munmap_sidetree(split, &mas_detach)) + goto munmap_sidetree_failed; - if (vma == last) - vma = __vma_next(mm, prev); - mas_reset(mas); + count++; + if (vma == next) + vma = split; + break; + } + if (munmap_sidetree(next, &mas_detach)) + goto munmap_sidetree_failed; + + count++; +#ifdef CONFIG_DEBUG_VM_MAPLE_TREE + BUG_ON(next->vm_start < start); + BUG_ON(next->vm_start > end); +#endif } + if (!next) + next = mas_next(mas, ULONG_MAX); + if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas @@ -2582,35 +2507,36 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, goto userfaultfd_error; } - /* - * unlock any mlock()ed ranges before detaching vmas, count the number - * of VMAs to be dropped, and return the tail entry of the affected - * area. - */ - mm->map_count -= unlock_range(vma, &last, end); - /* Drop removed area from the tree */ + /* Point of no return */ + mas_set_range(mas, start, end - 1); +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) + /* Make sure no VMAs are about to be lost. */ + { + MA_STATE(test, &mt_detach, start, end - 1); + struct vm_area_struct *vma_mas, *vma_test; + int test_count = 0; + + rcu_read_lock(); + vma_test = mas_find(&test, end - 1); + mas_for_each(mas, vma_mas, end - 1) { + BUG_ON(vma_mas != vma_test); + test_count++; + vma_test = mas_next(&test, end - 1); + } + rcu_read_unlock(); + BUG_ON(count != test_count); + mas_set_range(mas, start, end - 1); + } +#endif mas_store_prealloc(mas, NULL); - - /* Detach vmas from the MM linked list */ - vma->vm_prev = NULL; - if (prev) - prev->vm_next = last->vm_next; - else - mm->mmap = last->vm_next; - - if (last->vm_next) { - last->vm_next->vm_prev = prev; - last->vm_next = NULL; - } else - mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; - + mm->map_count -= count; /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or * VM_GROWSUP VMA. Such VMAs can change their size under * down_read(mmap_lock) and collide with the VMA we are about to unmap. */ if (downgrade) { - if (last && (last->vm_flags & VM_GROWSDOWN)) + if (next && (next->vm_flags & VM_GROWSDOWN)) downgrade = false; else if (prev && (prev->vm_flags & VM_GROWSUP)) downgrade = false; @@ -2618,18 +2544,22 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mmap_write_downgrade(mm); } - unmap_region(mm, vma, prev, start, end); - - /* Fix up all other VM information */ - remove_vma_list(mm, vma); + unmap_region(mm, &mt_detach, vma, prev, next, start, end); + /* Statistics and freeing VMAs */ + mas_set(&mas_detach, start); + remove_mt(mm, &mas_detach); + __mt_destroy(&mt_detach); validate_mm(mm); return downgrade ? 1 : 0; -map_count_exceeded: -split_failed: userfaultfd_error: +munmap_sidetree_failed: +end_split_failed: + __mt_destroy(&mt_detach); +start_split_failed: +map_count_exceeded: mas_destroy(mas); return error; } @@ -2864,7 +2794,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, i_mmap_lock_write(vma->vm_file->f_mapping); vma_mas_store(vma, &mas); - __vma_link_list(mm, vma, prev); mm->map_count++; if (vma->vm_file) { if (vma->vm_flags & VM_SHARED) @@ -2922,7 +2851,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma->vm_file = NULL; /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); @@ -3011,11 +2940,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, goto out; if (start + size > vma->vm_end) { - struct vm_area_struct *next; + VMA_ITERATOR(vmi, mm, vma->vm_end); + struct vm_area_struct *next, *prev = vma; - for (next = vma->vm_next; next; next = next->vm_next) { + for_each_vma_range(vmi, next, start + size) { /* hole between vmas ? */ - if (next->vm_start != next->vm_prev->vm_end) + if (next->vm_start != prev->vm_end) goto out; if (next->vm_file != vma->vm_file) @@ -3024,8 +2954,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, if (next->vm_flags != vma->vm_flags) goto out; - if (start + size <= next->vm_end) - break; + prev = next; } if (!next) @@ -3071,7 +3000,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, struct list_head *uf) { struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct unmap; + struct vm_area_struct unmap, *next; unsigned long unmap_pages; int ret; @@ -3087,6 +3016,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, ret = userfaultfd_unmap_prep(mm, newbrk, oldbrk, uf); if (ret) return ret; + ret = 1; /* Change the oldbrk of vma to the newbrk of the munmap area */ @@ -3108,6 +3038,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, vma_mas_remove(&unmap, mas); + vma->vm_end = newbrk; if (vma->anon_vma) { anon_vma_interval_tree_post_update_vma(vma); anon_vma_unlock_write(vma->anon_vma); @@ -3117,8 +3048,9 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, if (vma->vm_flags & VM_LOCKED) mm->locked_vm -= unmap_pages; + next = mas_next(mas, ULONG_MAX); mmap_write_downgrade(mm); - unmap_region(mm, &unmap, vma, newbrk, oldbrk); + unmap_region(mm, mas->tree, &unmap, vma, next, newbrk, oldbrk); /* Statistics */ vm_stat_account(mm, vma->vm_flags, -unmap_pages); if (vma->vm_flags & VM_ACCOUNT) @@ -3142,11 +3074,9 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, * do some brk-specific accounting here. */ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, - unsigned long addr, unsigned long len, - unsigned long flags) + unsigned long addr, unsigned long len, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *prev = NULL; validate_mm_mt(mm); @@ -3190,7 +3120,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, khugepaged_enter_vma(vma, flags); goto out; } - prev = vma; /* create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); @@ -3208,12 +3137,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, goto mas_store_fail; mm->map_count++; - - if (!prev) - prev = mas_prev(mas, 0); - - __vma_link_list(mm, vma, prev); - mm->map_count++; out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; @@ -3221,7 +3144,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; - validate_mm_mt(mm); + validate_mm(mm); return 0; mas_store_fail: @@ -3295,6 +3218,8 @@ void exit_mmap(struct mm_struct *mm) struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); + int count = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); @@ -3319,7 +3244,7 @@ void exit_mmap(struct mm_struct *mm) mmap_write_lock(mm); arch_exit_mmap(mm); - vma = mm->mmap; + vma = mas_find(&mas, ULONG_MAX); if (!vma) { /* Can happen if dup_mmap() received an OOM */ mmap_write_unlock(mm); @@ -3330,22 +3255,29 @@ void exit_mmap(struct mm_struct *mm) flush_cache_mm(mm); tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ - /* Use -1 here to ensure all VMAs in the mm are unmapped */ - unmap_vmas(&tlb, vma, 0, -1); - free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); + /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ + unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX); + free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, + USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb); - /* Walk the list again, actually closing and freeing it. */ - while (vma) { + /* + * Walk the list again, actually closing and freeing it, with preemption + * enabled, without holding any MM locks besides the unreachable + * mmap_write_lock. + */ + do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - vma = remove_vma(vma); + remove_vma(vma); + count++; cond_resched(); - } + } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); + + BUG_ON(count != mm->map_count); trace_exit_mmap(mm); __mt_destroy(&mm->mm_mt); - mm->mmap = NULL; mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); } @@ -3382,7 +3314,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - if (vma_link(mm, vma, prev)) + if (vma_link(mm, vma)) return -ENOMEM; return 0; @@ -3412,7 +3344,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, faulted_in_anon_vma = false; } - if (range_has_overlap(mm, addr, addr + len, &prev)) + new_vma = find_vma_prev(mm, addr, &prev); + if (new_vma->vm_start < addr + len) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, @@ -3455,7 +3388,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); - if (vma_link(mm, new_vma, prev)) + if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; } @@ -3760,12 +3693,13 @@ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; + MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_assert_write_locked(mm); mutex_lock(&mm_all_locks_mutex); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && @@ -3773,7 +3707,8 @@ int mm_take_all_locks(struct mm_struct *mm) vm_lock_mapping(mm, vma->vm_file->f_mapping); } - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_set(&mas, 0); + mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && @@ -3781,7 +3716,8 @@ int mm_take_all_locks(struct mm_struct *mm) vm_lock_mapping(mm, vma->vm_file->f_mapping); } - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_set(&mas, 0); + mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) @@ -3840,11 +3776,12 @@ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; + MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_assert_write_locked(mm); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + mas_for_each(&mas, vma, ULONG_MAX) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); diff --git a/mm/nommu.c b/mm/nommu.c index f6b187090d958..7b3fad6118953 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -584,17 +584,12 @@ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *prev; - BUG_ON(!vma->vm_region); setup_vma_to_mm(vma, mm); - prev = mas_prev(mas, 0); - mas_reset(mas); /* add the VMA to the tree */ vma_mas_store(vma, mas); - __vma_link_list(mm, vma, prev); } /* @@ -647,7 +642,6 @@ static int delete_vma_from_mm(struct vm_area_struct *vma) /* remove from the MM's tree and list */ vma_mas_remove(vma, &mas); - __vma_unlink_list(vma->vm_mm, vma); return 0; } diff --git a/mm/util.c b/mm/util.c index 812365cbdd197..c831c9f8766b4 100644 --- a/mm/util.c +++ b/mm/util.c @@ -272,46 +272,6 @@ void *memdup_user_nul(const void __user *src, size_t len) } EXPORT_SYMBOL(memdup_user_nul); -void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev) -{ - struct vm_area_struct *next; - - vma->vm_prev = prev; - if (prev) { - next = prev->vm_next; - prev->vm_next = vma; - } else { - next = mm->mmap; - mm->mmap = vma; - } - vma->vm_next = next; - if (next) - next->vm_prev = vma; - else - mm->highest_vm_end = vm_end_gap(vma); -} - -void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) -{ - struct vm_area_struct *prev, *next; - - next = vma->vm_next; - prev = vma->vm_prev; - if (prev) - prev->vm_next = next; - else - mm->mmap = next; - if (next) { - next->vm_prev = prev; - } else { - if (prev) - mm->highest_vm_end = vm_end_gap(prev); - else - mm->highest_vm_end = 0; - } -} - /* Check if the vma is being used as a stack by this task */ int vma_is_stack_for_current(struct vm_area_struct *vma) { From 224d6a1a2044c096b26b575e7cd09446b8ee0e33 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 23 Jun 2022 21:42:37 +0800 Subject: [PATCH 079/321] mm/mmap: fix error return code in do_mas_align_munmap() Return error code when munmap_sidetree() fails in do_mas_align_munmap(). Link: https://lkml.kernel.org/r/20220623134237.2127440-1-yangyingliang@huawei.com Fixes: 81f5504dfb36 ("mm/mmap: change do_mas_align_munmap() to avoid preallocations for sidetree") Signed-off-by: Yang Yingliang Reported-by: Hulk Robot Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mmap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 5baedf4d46502..f685433d6d78b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2470,7 +2470,8 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mas_set(mas, end); split = mas_prev(mas, 0); - if (munmap_sidetree(split, &mas_detach)) + error = munmap_sidetree(split, &mas_detach); + if (error) goto munmap_sidetree_failed; count++; @@ -2478,7 +2479,8 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, vma = split; break; } - if (munmap_sidetree(next, &mas_detach)) + error = munmap_sidetree(next, &mas_detach); + if (error) goto munmap_sidetree_failed; count++; From a7ebd0ce1b9d1df8000fa9ca34f0c417872b58e5 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 28 Jun 2022 10:46:19 +0100 Subject: [PATCH 080/321] mm: document maple tree pointer at unmap_vmas() at memory.c A maple tree pointer parameter was added at unmap_vmas() without description: mm/memory.c:1729: warning: Function parameter or member 'mt' not described in 'unmap_vmas' Document it. Link: https://lkml.kernel.org/r/d2690118414c5458c448348cc94f831d8e8444ce.1656409369.git.mchehab@kernel.org Fixes: 5966e5c968b2 ("mm: remove the vma linked list") Fixes: f8acc5e9581e ("Maple Tree: add new data structure") Signed-off-by: Mauro Carvalho Chehab Cc: "Liam R. Howlett" Signed-off-by: Andrew Morton --- mm/memory.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 176f76323c007..64bf660f3e40c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1699,6 +1699,8 @@ static void unmap_single_vma(struct mmu_gather *tlb, /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather + * @mt: maple tree pointer. A maple tree is a RCU-safe range based B-tree + * designed to use modern processor cache efficiently * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping From ffefb58361d427d438eb2b610766fd3599dc4632 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 29 Jun 2022 16:45:49 -0700 Subject: [PATCH 081/321] mm-remove-the-vma-linked-list-fix-2-fix alter description, per Liam Cc: "Liam R. Howlett" Cc: Mauro Carvalho Chehab Signed-off-by: Andrew Morton --- mm/memory.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 64bf660f3e40c..2028d3b23db7e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1699,8 +1699,7 @@ static void unmap_single_vma(struct mmu_gather *tlb, /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather - * @mt: maple tree pointer. A maple tree is a RCU-safe range based B-tree - * designed to use modern processor cache efficiently + * @mt: The maple tree pointer for the VMAs * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping From 944b01159484434615f844551fb7c15a9d0d5c03 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Wed, 6 Jul 2022 00:35:33 +0000 Subject: [PATCH 082/321] mm/mmap: fix copy_vma() new_vma check When checking new_vma limits to ensure they are not within where the copy of the vma will be placed, ensure new_vma is not NULL. Link: https://lkml.kernel.org/r/20220706003522.1827240-1-Liam.Howlett@oracle.com Fixes: ded0cf440d9b (mm: remove the vma linked list) Signed-off-by: Liam R. Howlett Reported-by: Yu Zhao Signed-off-by: Andrew Morton --- mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index f685433d6d78b..365572eb9da53 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3347,7 +3347,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, } new_vma = find_vma_prev(mm, addr, &prev); - if (new_vma->vm_start < addr + len) + if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, From b3c7a0adb500f828499a68485e8f65ec6fa05a32 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:14 +0000 Subject: [PATCH 083/321] mm/mmap: drop range_has_overlap() function Since there is no longer a linked list, the range_has_overlap() function is identical to the find_vma_intersection() function. Link: https://lkml.kernel.org/r/20220504011345.662299-53-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-69-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 365572eb9da53..4fb90ac5c50e5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -438,30 +438,6 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } -/* - * range_has_overlap() - Check the @start - @end range for overlapping VMAs and - * sets up a pointer to the previous VMA - * @mm: the mm struct - * @start: the start address of the range - * @end: the end address of the range - * @pprev: the pointer to the pointer of the previous VMA - * - * Returns: True if there is an overlapping VMA, false otherwise - */ -static inline -bool range_has_overlap(struct mm_struct *mm, unsigned long start, - unsigned long end, struct vm_area_struct **pprev) -{ - struct vm_area_struct *existing; - - MA_STATE(mas, &mm->mm_mt, start, start); - rcu_read_lock(); - existing = mas_find(&mas, end - 1); - *pprev = mas_prev(&mas, 0); - rcu_read_unlock(); - return existing ? true : false; -} - static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { @@ -3290,9 +3266,7 @@ void exit_mmap(struct mm_struct *mm) */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *prev; - - if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev)) + if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && From 4a235a74e58f7f885f60ed53af82325176dd568e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 21 Jun 2022 20:47:14 +0000 Subject: [PATCH 084/321] mm/mmap.c: pass in mapping to __vma_link_file() __vma_link_file() resolves the mapping from the file, if there is one. Pass through the mapping and check the vm_file externally since most places already have the required information and check of vm_file. Link: https://lkml.kernel.org/r/20220504011345.662299-54-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-70-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Signed-off-by: Andrew Morton --- mm/mmap.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 4fb90ac5c50e5..304fa66c07529 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -246,6 +246,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (brk < min_brk) goto out; + /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data @@ -322,7 +323,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; - out: mmap_write_unlock(mm); return origbrk; @@ -455,21 +455,15 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, return nr_pages; } -static void __vma_link_file(struct vm_area_struct *vma) +static void __vma_link_file(struct vm_area_struct *vma, + struct address_space *mapping) { - struct file *file; - - file = vma->vm_file; - if (file) { - struct address_space *mapping = file->f_mapping; - - if (vma->vm_flags & VM_SHARED) - mapping_allow_writable(mapping); + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(mapping); - flush_dcache_mmap_lock(mapping); - vma_interval_tree_insert(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); - } + flush_dcache_mmap_lock(mapping); + vma_interval_tree_insert(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); } /* @@ -536,10 +530,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) } vma_mas_store(vma, &mas); - __vma_link_file(vma); - if (mapping) + if (mapping) { + __vma_link_file(vma, mapping); i_mmap_unlock_write(mapping); + } mm->map_count++; validate_mm(mm); @@ -783,14 +778,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); - if (insert) { + if (insert && insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ - __vma_link_file(insert); + __vma_link_file(insert, insert->vm_file->f_mapping); } } @@ -3057,7 +3052,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; validate_mm_mt(mm); - /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -3115,6 +3109,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, goto mas_store_fail; mm->map_count++; + out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; From c2ca38f1ca2645a36f63f90bc523d71fd7455431 Mon Sep 17 00:00:00 2001 From: Kalpana Shetty Date: Tue, 31 May 2022 15:55:56 +0530 Subject: [PATCH 085/321] selftests/vm: add protection_keys tests to run_vmtests Add "protected_keys" tests to "run_vmtests.sh" would help run all VM related tests from a single shell script. Link: https://lkml.kernel.org/r/20220610090704.296-1-kalpana.shetty@amd.com Link: https://lkml.kernel.org/r/20220531102556.388-1-kalpana.shetty@amd.com Signed-off-by: Kalpana Shetty Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/run_vmtests.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh index 27c01c35c7a93..ffba52659dc78 100755 --- a/tools/testing/selftests/vm/run_vmtests.sh +++ b/tools/testing/selftests/vm/run_vmtests.sh @@ -179,4 +179,11 @@ run_test ./ksm_tests -N -m 1 # KSM test with 2 NUMA nodes and merge_across_nodes = 0 run_test ./ksm_tests -N -m 0 +# protection_keys tests +if [ $VADDR64 -eq 0 ]; then + run_test ./protection_keys_32 +else + run_test ./protection_keys_64 +fi + exit $exitcode From 0582ddce725abe249fbb2577150b36a0b87f196f Mon Sep 17 00:00:00 2001 From: Kalpana Shetty Date: Sat, 18 Jun 2022 01:59:31 +0530 Subject: [PATCH 086/321] selftests-vm-add-protection_keys-tests-to-run_vmtests-v4 Shuah Khan's review comments incorporated, added -x executable check Link: https://lkml.kernel.org/r/20220617202931.357-1-kalpana.shetty@amd.com Signed-off-by: Kalpana Shetty Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/run_vmtests.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh index ffba52659dc78..2af563a9652e0 100755 --- a/tools/testing/selftests/vm/run_vmtests.sh +++ b/tools/testing/selftests/vm/run_vmtests.sh @@ -180,9 +180,13 @@ run_test ./ksm_tests -N -m 1 run_test ./ksm_tests -N -m 0 # protection_keys tests -if [ $VADDR64 -eq 0 ]; then +if [ -x ./protection_keys_32 ] +then run_test ./protection_keys_32 -else +fi + +if [ -x ./protection_keys_64 ] +then run_test ./protection_keys_64 fi From 775e7e1a15070cd5fe2e666ec0beb0a19328bc28 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Tue, 31 May 2022 15:30:59 -0700 Subject: [PATCH 087/321] mm: drop oom code from exit_mmap The primary reason to invoke the oom reaper from the exit_mmap path used to be a prevention of an excessive oom killing if the oom victim exit races with the oom reaper (see [1] for more details). The invocation has moved around since then because of the interaction with the munlock logic but the underlying reason has remained the same (see [2]). Munlock code is no longer a problem since [3] and there shouldn't be any blocking operation before the memory is unmapped by exit_mmap so the oom reaper invocation can be dropped. The unmapping part can be done with the non-exclusive mmap_sem and the exclusive one is only required when page tables are freed. Remove the oom_reaper from exit_mmap which will make the code easier to read. This is really unlikely to make any observable difference although some microbenchmarks could benefit from one less branch that needs to be evaluated even though it almost never is true. [1] 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently") [2] 27ae357fa82b ("mm, oom: fix concurrent munlock and oom reaper unmap, v3") [3] a213e5cf71cb ("mm/munlock: delete munlock_vma_pages_all(), allow oomreap") Link: https://lkml.kernel.org/r/20220531223100.510392-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Michal Hocko Cc: Andrea Arcangeli Cc: Christian Brauner (Microsoft) Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Rientjes Cc: Jann Horn Cc: Johannes Weiner Cc: John Hubbard Cc: "Kirill A . Shutemov" Cc: Liam Howlett Cc: Matthew Wilcox Cc: Minchan Kim Cc: Oleg Nesterov Cc: Peter Xu Cc: Roman Gushchin Cc: Shakeel Butt Cc: Shuah Khan Signed-off-by: Andrew Morton --- include/linux/oom.h | 2 -- mm/mmap.c | 24 +++++++----------------- mm/oom_kill.c | 2 +- 3 files changed, 8 insertions(+), 20 deletions(-) diff --git a/include/linux/oom.h b/include/linux/oom.h index 02d1e7bbd8cd5..6cdde62b078b5 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -106,8 +106,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) return 0; } -bool __oom_reap_task_mm(struct mm_struct *mm); - long oom_badness(struct task_struct *p, unsigned long totalpages); diff --git a/mm/mmap.c b/mm/mmap.c index 304fa66c07529..0abf917abb448 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3197,23 +3197,6 @@ void exit_mmap(struct mm_struct *mm) /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); - if (unlikely(mm_is_oom_victim(mm))) { - /* - * Manually reap the mm to free as much memory as possible. - * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard - * this mm from further consideration. Taking mm->mmap_lock for - * write after setting MMF_OOM_SKIP will guarantee that the oom - * reaper will not run on this mm again after mmap_lock is - * dropped. - * - * Nothing can be holding mm->mmap_lock here and the above call - * to mmu_notifier_release(mm) ensures mmu notifier callbacks in - * __oom_reap_task_mm() will not block. - */ - (void)__oom_reap_task_mm(mm); - set_bit(MMF_OOM_SKIP, &mm->flags); - } - mmap_write_lock(mm); arch_exit_mmap(mm); @@ -3230,6 +3213,13 @@ void exit_mmap(struct mm_struct *mm) /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX); + + /* + * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper + * because the memory has been already freed. Do not bother checking + * mm_is_oom_victim because setting a bit unconditionally is cheaper. + */ + set_bit(MMF_OOM_SKIP, &mm->flags); free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3996301450e8d..decb21474c6c5 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -509,7 +509,7 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); static struct task_struct *oom_reaper_list; static DEFINE_SPINLOCK(oom_reaper_lock); -bool __oom_reap_task_mm(struct mm_struct *mm) +static bool __oom_reap_task_mm(struct mm_struct *mm) { struct vm_area_struct *vma; bool ret = true; From b736e7bc91e7965977b371a5ef2a20ff76e7aa67 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 1 Jun 2022 15:17:52 -0700 Subject: [PATCH 088/321] mm-drop-oom-code-from-exit_mmap-fix-fix restore Suren's mmap_read_lock() optimization Cc: Suren Baghdasaryan Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/mmap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 0abf917abb448..e272d4b9b6b1c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3197,13 +3197,13 @@ void exit_mmap(struct mm_struct *mm) /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); - mmap_write_lock(mm); + mmap_read_lock(mm); arch_exit_mmap(mm); vma = mas_find(&mas, ULONG_MAX); if (!vma) { /* Can happen if dup_mmap() received an OOM */ - mmap_write_unlock(mm); + mmap_read_unlock(mm); return; } @@ -3213,6 +3213,7 @@ void exit_mmap(struct mm_struct *mm) /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX); + mmap_read_unlock(mm); /* * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper @@ -3220,6 +3221,7 @@ void exit_mmap(struct mm_struct *mm) * mm_is_oom_victim because setting a bit unconditionally is cheaper. */ set_bit(MMF_OOM_SKIP, &mm->flags); + mmap_write_lock(mm); free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb); From 870c4fdebdebc581329573a67be27548de5b2044 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Tue, 31 May 2022 15:31:00 -0700 Subject: [PATCH 089/321] mm: delete unused MMF_OOM_VICTIM flag With the last usage of MMF_OOM_VICTIM in exit_mmap gone, this flag is now unused and can be removed. Link: https://lkml.kernel.org/r/20220531223100.510392-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Michal Hocko Cc: David Rientjes Cc: Matthew Wilcox Cc: Johannes Weiner Cc: Roman Gushchin Cc: Minchan Kim Cc: "Kirill A . Shutemov" Cc: Andrea Arcangeli Cc: Christian Brauner (Microsoft) Cc: Christoph Hellwig Cc: Oleg Nesterov Cc: David Hildenbrand Cc: Jann Horn Cc: Shakeel Butt Cc: Peter Xu Cc: John Hubbard Cc: Shuah Khan Cc: Liam Howlett Signed-off-by: Andrew Morton --- include/linux/oom.h | 9 --------- include/linux/sched/coredump.h | 7 +++---- mm/oom_kill.c | 4 +--- 3 files changed, 4 insertions(+), 16 deletions(-) diff --git a/include/linux/oom.h b/include/linux/oom.h index 6cdde62b078b5..7d0c9c48a0c54 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -77,15 +77,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) return tsk->signal->oom_mm; } -/* - * Use this helper if tsk->mm != mm and the victim mm needs a special - * handling. This is guaranteed to stay true after once set. - */ -static inline bool mm_is_oom_victim(struct mm_struct *mm) -{ - return test_bit(MMF_OOM_VICTIM, &mm->flags); -} - /* * Checks whether a page fault on the given mm is still reliable. * This is no longer true if the oom reaper started to reap the diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 4d0a5be28b70f..8270ad7ae14c2 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -71,9 +71,8 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ -#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ -#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ -#define MMF_MULTIPROCESS 27 /* mm is shared between processes */ +#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ +#define MMF_MULTIPROCESS 26 /* mm is shared between processes */ /* * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either * replaced in the future by mm.pinned_vm when it becomes stable, or grow into @@ -81,7 +80,7 @@ static inline int get_dumpable(struct mm_struct *mm) * pinned pages were unpinned later on, we'll still keep this bit set for the * lifecycle of this mm, just for simplicity. */ -#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */ +#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index decb21474c6c5..35ec75cdfee21 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -765,10 +765,8 @@ static void mark_oom_victim(struct task_struct *tsk) return; /* oom_mm is bound to the signal struct life time. */ - if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) mmgrab(tsk->signal->oom_mm); - set_bit(MMF_OOM_VICTIM, &mm->flags); - } /* * Make sure that the task is woken up from uninterruptible sleep From 2c5c9473dd66c0810107ab4f1e09d8f455a80318 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Mat=C4=9Bna?= Date: Fri, 3 Jun 2022 16:57:18 +0200 Subject: [PATCH 090/321] mm: refactor of vma_merge() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Refactor of vma_merge and new merge call", v4. I am currently working on my master's thesis trying to increase number of merges of VMAs currently failing because of page offset incompatibility and difference in their anon_vmas. The following refactor and added merge call included in this series is just two smaller upgrades I created along the way. This patch (of 2): Refactor vma_merge() to make it shorter and more understandable. Main change is the elimination of code duplicity in the case of merge next check. This is done by first doing checks and caching the results before executing the merge itself. The variable 'area' is divided into 'mid' and 'res' as previously it was used for two purposes, as the middle VMA between prev and next and also as the result of the merge itself. Exit paths are also unified. Link: https://lkml.kernel.org/r/20220603145719.1012094-1-matenajakub@gmail.com Link: https://lkml.kernel.org/r/20220603145719.1012094-2-matenajakub@gmail.com Signed-off-by: Jakub Matěna Reviewed-by: Vlastimil Babka Cc: Michal Hocko Cc: Mel Gorman Cc: Matthew Wilcox Cc: Liam Howlett Cc: Hugh Dickins Cc: "Kirill A . Shutemov" Cc: Rik van Riel Cc: Steven Rostedt Cc: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton --- mm/mmap.c | 87 +++++++++++++++++++++++-------------------------------- 1 file changed, 37 insertions(+), 50 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index e272d4b9b6b1c..d71f990447c6e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1078,8 +1078,10 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct anon_vma_name *anon_name) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; - struct vm_area_struct *area, *next; - int err; + struct vm_area_struct *mid, *next, *res; + int err = -1; + bool merge_prev = false; + bool merge_next = false; /* * We later require that vma->vm_flags == vm_flags, @@ -1089,75 +1091,60 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, return NULL; next = find_vma(mm, prev ? prev->vm_end : 0); - area = next; - if (area && area->vm_end == end) /* cases 6, 7, 8 */ + mid = next; + if (next && next->vm_end == end) /* cases 6, 7, 8 */ next = find_vma(mm, next->vm_end); /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); - VM_WARN_ON(area && end > area->vm_end); + VM_WARN_ON(mid && end > mid->vm_end); VM_WARN_ON(addr >= end); - /* - * Can it merge with the predecessor? - */ + /* Can we merge the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx, anon_name)) { - /* - * OK, it can. Can we now merge in the successor as well? - */ - if (next && end == next->vm_start && - mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, - pgoff+pglen, - vm_userfaultfd_ctx, anon_name) && - is_mergeable_anon_vma(prev->anon_vma, - next->anon_vma, NULL)) { - /* cases 1, 6 */ - err = __vma_adjust(prev, prev->vm_start, - next->vm_end, prev->vm_pgoff, NULL, - prev); - } else /* cases 2, 5, 7 */ - err = __vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL, prev); - if (err) - return NULL; - khugepaged_enter_vma(prev, vm_flags); - return prev; + merge_prev = true; } - - /* - * Can this new request be merged in front of next? - */ + /* Can we merge the successor? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx, anon_name)) { + merge_next = true; + } + /* Can we merge both the predecessor and the successor? */ + if (merge_prev && merge_next && + is_mergeable_anon_vma(prev->anon_vma, + next->anon_vma, NULL)) { /* cases 1, 6 */ + err = __vma_adjust(prev, prev->vm_start, + next->vm_end, prev->vm_pgoff, NULL, + prev); + res = prev; + } else if (merge_prev) { /* cases 2, 5, 7 */ + err = __vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL, prev); + res = prev; + } else if (merge_next) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL, next); - else { /* cases 3, 8 */ - err = __vma_adjust(area, addr, next->vm_end, - next->vm_pgoff - pglen, NULL, next); - /* - * In case 3 area is already equal to next and - * this is a noop, but in case 8 "area" has - * been removed and next was expanded over it. - */ - area = next; - } - if (err) - return NULL; - khugepaged_enter_vma(area, vm_flags); - return area; + addr, prev->vm_pgoff, NULL, next); + else /* cases 3, 8 */ + err = __vma_adjust(mid, addr, next->vm_end, + next->vm_pgoff - pglen, NULL, next); + res = next; } - return NULL; + /* + * Cannot merge with predecessor or successor or error in __vma_adjust? + */ + if (err) + return NULL; + khugepaged_enter_vma(res, vm_flags); + return res; } /* From 1ef6bc3e866fab11bc73abf1b6a00f73ca491054 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Mat=C4=9Bna?= Date: Fri, 3 Jun 2022 16:57:19 +0200 Subject: [PATCH 091/321] mm: add merging after mremap resize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When mremap call results in expansion, it might be possible to merge the VMA with the next VMA which might become adjacent. This patch adds vma_merge call after the expansion is done to try and merge. Link: https://lkml.kernel.org/r/20220603145719.1012094-3-matenajakub@gmail.com Signed-off-by: Jakub Matěna Reviewed-by: Vlastimil Babka Cc: Hugh Dickins Cc: "Kirill A . Shutemov" Cc: Liam Howlett Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Peter Zijlstra (Intel) Cc: Rik van Riel Cc: Steven Rostedt Signed-off-by: Andrew Morton --- mm/mremap.c | 19 +++++++++- tools/testing/selftests/vm/mremap_test.c | 47 +++++++++++++++++++++++- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 8644ff278f029..e465ffe279bb0 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -23,6 +24,7 @@ #include #include #include +#include #include #include @@ -1012,6 +1014,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { long pages = (new_len - old_len) >> PAGE_SHIFT; + unsigned long extension_start = addr + old_len; + unsigned long extension_end = addr + new_len; + pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT); if (vma->vm_flags & VM_ACCOUNT) { if (security_vm_enough_memory_mm(mm, pages)) { @@ -1020,8 +1025,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, } } - if (vma_adjust(vma, vma->vm_start, addr + new_len, - vma->vm_pgoff, NULL)) { + /* + * Function vma_merge() is called on the extension we are adding to + * the already existing vma, vma_merge() will merge this extension with + * the already existing vma (expand operation itself) and possibly also + * with the next vma if it becomes adjacent to the expanded vma and + * otherwise compatible. + */ + vma = vma_merge(mm, vma, extension_start, extension_end, + vma->vm_flags, vma->anon_vma, vma->vm_file, + extension_pgoff, vma_policy(vma), + vma->vm_userfaultfd_ctx, anon_vma_name(vma)); + if (!vma) { vm_unacct_memory(pages); ret = -ENOMEM; goto out; diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c index db0270127aeb0..0865a6cb5bdba 100644 --- a/tools/testing/selftests/vm/mremap_test.c +++ b/tools/testing/selftests/vm/mremap_test.c @@ -118,6 +118,48 @@ static unsigned long long get_mmap_min_addr(void) return addr; } +/* + * This test validates that merge is called when expanding a mapping. + * Mapping containing three pages is created, middle page is unmapped + * and then the mapping containing the first page is expanded so that + * it fills the created hole. The two parts should merge creating + * single mapping with three pages. + */ +static void mremap_expand_merge(unsigned long page_size) +{ + char *test_name = "mremap expand merge"; + FILE *fp; + char *line = NULL; + size_t len = 0; + bool success = false; + + char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + munmap(start + page_size, page_size); + mremap(start, page_size, 2 * page_size, 0); + + fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) { + ksft_test_result_fail("%s\n", test_name); + return; + } + + while(getline(&line, &len, fp) != -1) { + char *first = strtok(line,"- "); + void *first_val = (void *) strtol(first, NULL, 16); + char *second = strtok(NULL,"- "); + void *second_val = (void *) strtol(second, NULL, 16); + if (first_val == start && second_val == start + 3 * page_size) { + success = true; + break; + } + } + if (success) + ksft_test_result_pass("%s\n", test_name); + else + ksft_test_result_fail("%s\n", test_name); + fclose(fp); +} + /* * Returns the start address of the mapping on success, else returns * NULL on failure. @@ -336,6 +378,7 @@ int main(int argc, char **argv) int i, run_perf_tests; unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD; unsigned int pattern_seed; + int num_expand_tests = 1; struct test test_cases[MAX_TEST]; struct test perf_test_cases[MAX_PERF_TEST]; int page_size; @@ -407,12 +450,14 @@ int main(int argc, char **argv) (threshold_mb * _1MB >= _1GB); ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ? - ARRAY_SIZE(perf_test_cases) : 0)); + ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests); for (i = 0; i < ARRAY_SIZE(test_cases); i++) run_mremap_test_case(test_cases[i], &failures, threshold_mb, pattern_seed); + mremap_expand_merge(page_size); + if (run_perf_tests) { ksft_print_msg("\n%s\n", "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:"); From 37c1aaa61ce3d7c123c7e08ef1237beb84a2f63f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 3 Jun 2022 10:41:30 -0700 Subject: [PATCH 092/321] mm-add-merging-after-mremap-resize-checkpatch-fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit WARNING: line length of 108 exceeds 100 columns #97: FILE: tools/testing/selftests/vm/mremap_test.c:136: + char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); WARNING: Missing a blank line after declarations #98: FILE: tools/testing/selftests/vm/mremap_test.c:137: + char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + munmap(start + page_size, page_size); ERROR: space required before the open parenthesis '(' #107: FILE: tools/testing/selftests/vm/mremap_test.c:146: + while(getline(&line, &len, fp) != -1) { ERROR: space required after that ',' (ctx:VxV) #108: FILE: tools/testing/selftests/vm/mremap_test.c:147: + char *first = strtok(line,"- "); ^ ERROR: space required after that ',' (ctx:VxV) #110: FILE: tools/testing/selftests/vm/mremap_test.c:149: + char *second = strtok(NULL,"- "); ^ WARNING: Missing a blank line after declarations #112: FILE: tools/testing/selftests/vm/mremap_test.c:151: + void *second_val = (void *) strtol(second, NULL, 16); + if (first_val == start && second_val == start + 3 * page_size) { total: 3 errors, 3 warnings, 113 lines checked NOTE: For some of the reported defects, checkpatch may be able to mechanically convert to the typical style using --fix or --fix-inplace. ./patches/mm-add-merging-after-mremap-resize.patch has style problems, please review. NOTE: If any of the errors are false positives, please report them to the maintainer, see CHECKPATCH in MAINTAINERS. Please run checkpatch prior to sending patches Cc: Jakub Matěna Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/mremap_test.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c index 0865a6cb5bdba..9496346973d44 100644 --- a/tools/testing/selftests/vm/mremap_test.c +++ b/tools/testing/selftests/vm/mremap_test.c @@ -132,8 +132,9 @@ static void mremap_expand_merge(unsigned long page_size) char *line = NULL; size_t len = 0; bool success = false; + char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); munmap(start + page_size, page_size); mremap(start, page_size, 2 * page_size, 0); @@ -143,11 +144,12 @@ static void mremap_expand_merge(unsigned long page_size) return; } - while(getline(&line, &len, fp) != -1) { - char *first = strtok(line,"- "); - void *first_val = (void *) strtol(first, NULL, 16); - char *second = strtok(NULL,"- "); + while (getline(&line, &len, fp) != -1) { + char *first = strtok(line, "- "); + void *first_val = (void *)strtol(first, NULL, 16); + char *second = strtok(NULL, "- "); void *second_val = (void *) strtol(second, NULL, 16); + if (first_val == start && second_val == start + 3 * page_size) { success = true; break; From b6ae481d8f68fed4a42812d260f00d18218c8f02 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:35 -0500 Subject: [PATCH 093/321] mm: rename is_pinnable_pages to is_longterm_pinnable_pages Patch series "Add MEMORY_DEVICE_COHERENT for coherent device memory mapping", v8. This patch series introduces MEMORY_DEVICE_COHERENT, a type of memory owned by a device that can be mapped into CPU page tables like MEMORY_DEVICE_GENERIC and can also be migrated like MEMORY_DEVICE_PRIVATE. This patch series is mostly self-contained except for a few places where it needs to update other subsystems to handle the new memory type. System stability and performance are not affected according to our ongoing testing, including xfstests. How it works: The system BIOS advertises the GPU device memory (aka VRAM) as SPM (special purpose memory) in the UEFI system address map. The amdgpu driver registers the memory with devmap as MEMORY_DEVICE_COHERENT using devm_memremap_pages. The initial user for this hardware page migration capability is the Frontier supercomputer project. This functionality is not AMD-specific. We expect other GPU vendors to find this functionality useful, and possibly other hardware types in the future. Our test nodes in the lab are similar to the Frontier configuration, with .5 TB of system memory plus 256 GB of device memory split across 4 GPUs, all in a single coherent address space. Page migration is expected to improve application efficiency significantly. We will report empirical results as they become available. Coherent device type pages at gup are now migrated back to system memory if they are being pinned long-term (FOLL_LONGTERM). The reason is, that long-term pinning would interfere with the device memory manager owning the device-coherent pages (e.g. evictions in TTM). These series incorporate Alistair Popple patches to do this migration from pin_user_pages() calls. hmm_gup_test has been added to hmm-test to test different get user pages calls. This series includes handling of device-managed anonymous pages returned by vm_normal_pages. Although they behave like normal pages for purposes of mapping in CPU page tables and for COW, they do not support LRU lists, NUMA migration or THP. We also introduced a FOLL_LRU flag that adds the same behaviour to follow_page and related APIs, to allow callers to specify that they expect to put pages on an LRU list. This patch (of 15): is_pinnable_page() and folio_is_pinnable() were renamed to is_longterm_pinnable_page() and folio_is_longterm_pinnable() respectively. These functions are used in the FOLL_LONGTERM flag context. Link: https://lkml.kernel.org/r/20220707190349.9778-1-alex.sierra@amd.com Link: https://lkml.kernel.org/r/20220707190349.9778-2-alex.sierra@amd.com Signed-off-by: Alex Sierra Reviewed-by: David Hildenbrand Cc: Jason Gunthorpe Cc: Felix Kuehling Cc: Ralph Campbell Cc: Christoph Hellwig Cc: Jerome Glisse Cc: Alistair Popple Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- include/linux/mm.h | 8 ++++---- mm/gup.c | 4 ++-- mm/gup_test.c | 2 +- mm/hugetlb.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 1e0fc5f4fc3db..702d1dbeea253 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1642,7 +1642,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION -static inline bool is_pinnable_page(struct page *page) +static inline bool is_longterm_pinnable_page(struct page *page) { #ifdef CONFIG_CMA int mt = get_pageblock_migratetype(page); @@ -1653,15 +1653,15 @@ static inline bool is_pinnable_page(struct page *page) return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)); } #else -static inline bool is_pinnable_page(struct page *page) +static inline bool is_longterm_pinnable_page(struct page *page) { return true; } #endif -static inline bool folio_is_pinnable(struct folio *folio) +static inline bool folio_is_longterm_pinnable(struct folio *folio) { - return is_pinnable_page(&folio->page); + return is_longterm_pinnable_page(&folio->page); } static inline void set_page_zone(struct page *page, enum zone_type zone) diff --git a/mm/gup.c b/mm/gup.c index 280b46857cb3c..05af1c8670808 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -134,7 +134,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) * path. */ if (unlikely((flags & FOLL_LONGTERM) && - !is_pinnable_page(page))) + !is_longterm_pinnable_page(page))) return NULL; /* @@ -1926,7 +1926,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, continue; prev_folio = folio; - if (folio_is_pinnable(folio)) + if (folio_is_longterm_pinnable(folio)) continue; /* diff --git a/mm/gup_test.c b/mm/gup_test.c index d974dec19e1ca..12b0a91767d3c 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -53,7 +53,7 @@ static void verify_dma_pinned(unsigned int cmd, struct page **pages, dump_page(page, "gup_test failure"); break; } else if (cmd == PIN_LONGTERM_BENCHMARK && - WARN(!is_pinnable_page(page), + WARN(!is_longterm_pinnable_page(page), "pages[%lu] is NOT pinnable but pinned\n", i)) { dump_page(page, "gup_test failure"); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2c0da0da5e051..faf816b6c573d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1129,7 +1129,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) lockdep_assert_held(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { - if (pin && !is_pinnable_page(page)) + if (pin && !is_longterm_pinnable_page(page)) continue; if (PageHWPoison(page)) From 760b37984e0340841957235cad34ac1e7e715a17 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:36 -0500 Subject: [PATCH 094/321] mm: move page zone helpers into new header-specific file To have a cleaner way to expose all page zone helpers in one header file, rather than split them between mm.h and memremap.h files. Link: https://lkml.kernel.org/r/20220707190349.9778-3-alex.sierra@amd.com Signed-off-by: Alex Sierra Cc: Alistair Popple Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Felix Kuehling Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- drivers/infiniband/core/rw.c | 1 - drivers/nvme/target/io-cmd-bdev.c | 1 - include/linux/memremap.h | 113 +---------------- include/linux/mm.h | 79 +----------- include/linux/page_zone.h | 194 ++++++++++++++++++++++++++++++ mm/memcontrol.c | 1 - 6 files changed, 196 insertions(+), 193 deletions(-) create mode 100644 include/linux/page_zone.h diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 4d98f931a13dd..5a3bd41b331c9 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016 HGST, a Western Digital Company. */ -#include #include #include #include diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 27a72504d31ce..16a8b7665fe43 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -6,7 +6,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include -#include #include #include "nvmet.h" diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 9f5ee49482ded..0f22f6f42e7dc 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -2,70 +2,14 @@ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ -#include #include #include #include +#include struct resource; struct device; -/** - * struct vmem_altmap - pre-allocated storage for vmemmap_populate - * @base_pfn: base of the entire dev_pagemap mapping - * @reserve: pages mapped, but reserved for driver use (relative to @base) - * @free: free pages set aside in the mapping for memmap storage - * @align: pages reserved to meet allocation alignments - * @alloc: track pages consumed, private to vmemmap_populate() - */ -struct vmem_altmap { - unsigned long base_pfn; - const unsigned long end_pfn; - const unsigned long reserve; - unsigned long free; - unsigned long align; - unsigned long alloc; -}; - -/* - * Specialize ZONE_DEVICE memory into multiple types each has a different - * usage. - * - * MEMORY_DEVICE_PRIVATE: - * Device memory that is not directly addressable by the CPU: CPU can neither - * read nor write private memory. In this case, we do still have struct pages - * backing the device memory. Doing so simplifies the implementation, but it is - * important to remember that there are certain points at which the struct page - * must be treated as an opaque object, rather than a "normal" struct page. - * - * A more complete discussion of unaddressable memory may be found in - * include/linux/hmm.h and Documentation/mm/hmm.rst. - * - * MEMORY_DEVICE_FS_DAX: - * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. In support of coordinating page - * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a - * wakeup event whenever a page is unpinned and becomes idle. This - * wakeup is used to coordinate physical address space management (ex: - * fs truncate/hole punch) vs pinned pages (ex: device dma). - * - * MEMORY_DEVICE_GENERIC: - * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. This is for example used by DAX devices - * that expose memory using a character device. - * - * MEMORY_DEVICE_PCI_P2PDMA: - * Device memory residing in a PCI BAR intended for use with Peer-to-Peer - * transactions. - */ -enum memory_type { - /* 0 is reserved to catch uninitialized type fields */ - MEMORY_DEVICE_PRIVATE = 1, - MEMORY_DEVICE_FS_DAX, - MEMORY_DEVICE_GENERIC, - MEMORY_DEVICE_PCI_P2PDMA, -}; - struct dev_pagemap_ops { /* * Called once the page refcount reaches 0. The reference count will be @@ -83,42 +27,6 @@ struct dev_pagemap_ops { #define PGMAP_ALTMAP_VALID (1 << 0) -/** - * struct dev_pagemap - metadata for ZONE_DEVICE mappings - * @altmap: pre-allocated/reserved memory for vmemmap allocations - * @ref: reference count that pins the devm_memremap_pages() mapping - * @done: completion for @ref - * @type: memory type: see MEMORY_* in memory_hotplug.h - * @flags: PGMAP_* flags to specify defailed behavior - * @vmemmap_shift: structural definition of how the vmemmap page metadata - * is populated, specifically the metadata page order. - * A zero value (default) uses base pages as the vmemmap metadata - * representation. A bigger value will set up compound struct pages - * of the requested order value. - * @ops: method table - * @owner: an opaque pointer identifying the entity that manages this - * instance. Used by various helpers to make sure that no - * foreign ZONE_DEVICE memory is accessed. - * @nr_range: number of ranges to be mapped - * @range: range to be mapped when nr_range == 1 - * @ranges: array of ranges to be mapped when nr_range > 1 - */ -struct dev_pagemap { - struct vmem_altmap altmap; - struct percpu_ref ref; - struct completion done; - enum memory_type type; - unsigned int flags; - unsigned long vmemmap_shift; - const struct dev_pagemap_ops *ops; - void *owner; - int nr_range; - union { - struct range range; - struct range ranges[0]; - }; -}; - static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) { if (pgmap->flags & PGMAP_ALTMAP_VALID) @@ -131,25 +39,6 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) return 1 << pgmap->vmemmap_shift; } -static inline bool is_device_private_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool folio_is_device_private(const struct folio *folio) -{ - return is_device_private_page(&folio->page); -} - -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_PCI_P2PDMA) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} - #ifdef CONFIG_ZONE_DEVICE void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); diff --git a/include/linux/mm.h b/include/linux/mm.h index 702d1dbeea253..9671e06aff022 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -28,6 +28,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1076,84 +1077,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); * back into memory. */ -/* - * The zone field is never updated after free_area_init_core() - * sets it, so none of the operations on it need to be atomic. - */ - -/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ -#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) -#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) -#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) -#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) -#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) - -/* - * Define the bit shifts to access each section. For non-existent - * sections we define the shift as 0; that plus a 0 mask ensures - * the compiler will optimise away reference to them. - */ -#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) -#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) -#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) -#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) -#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) - -/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ -#ifdef NODE_NOT_IN_PAGE_FLAGS -#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) -#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ - SECTIONS_PGOFF : ZONES_PGOFF) -#else -#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) -#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ - NODES_PGOFF : ZONES_PGOFF) -#endif - -#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) - -#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) -#define NODES_MASK ((1UL << NODES_WIDTH) - 1) -#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) -#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) -#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) -#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) - -static inline enum zone_type page_zonenum(const struct page *page) -{ - ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); - return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; -} - -static inline enum zone_type folio_zonenum(const struct folio *folio) -{ - return page_zonenum(&folio->page); -} - -#ifdef CONFIG_ZONE_DEVICE -static inline bool is_zone_device_page(const struct page *page) -{ - return page_zonenum(page) == ZONE_DEVICE; -} -extern void memmap_init_zone_device(struct zone *, unsigned long, - unsigned long, struct dev_pagemap *); -#else -static inline bool is_zone_device_page(const struct page *page) -{ - return false; -} -#endif - -static inline bool folio_is_zone_device(const struct folio *folio) -{ - return is_zone_device_page(&folio->page); -} - -static inline bool is_zone_movable_page(const struct page *page) -{ - return page_zonenum(page) == ZONE_MOVABLE; -} - #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); diff --git a/include/linux/page_zone.h b/include/linux/page_zone.h new file mode 100644 index 0000000000000..7cded718ad06e --- /dev/null +++ b/include/linux/page_zone.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PAGE_ZONE_H_ +#define _PAGE_ZONE_H_ + +/* + * The zone field is never updated after free_area_init_core() + * sets it, so none of the operations on it need to be atomic. + */ + +/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) +#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) +#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) + +/* + * Define the bit shifts to access each section. For non-existent + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) +#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) +#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS +#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ + SECTIONS_PGOFF : ZONES_PGOFF) +#else +#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ + NODES_PGOFF : ZONES_PGOFF) +#endif + +#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) +#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) +#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) + +/* + * Specialize ZONE_DEVICE memory into multiple types each has a different + * usage. + * + * MEMORY_DEVICE_PRIVATE: + * Device memory that is not directly addressable by the CPU: CPU can neither + * read nor write private memory. In this case, we do still have struct pages + * backing the device memory. Doing so simplifies the implementation, but it is + * important to remember that there are certain points at which the struct page + * must be treated as an opaque object, rather than a "normal" struct page. + * + * A more complete discussion of unaddressable memory may be found in + * include/linux/hmm.h and Documentation/mm/hmm.rst. + * + * MEMORY_DEVICE_FS_DAX: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. In support of coordinating page + * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a + * wakeup event whenever a page is unpinned and becomes idle. This + * wakeup is used to coordinate physical address space management (ex: + * fs truncate/hole punch) vs pinned pages (ex: device dma). + * + * MEMORY_DEVICE_GENERIC: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. This is for example used by DAX devices + * that expose memory using a character device. + * + * MEMORY_DEVICE_PCI_P2PDMA: + * Device memory residing in a PCI BAR intended for use with Peer-to-Peer + * transactions. + */ +enum memory_type { + /* 0 is reserved to catch uninitialized type fields */ + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_FS_DAX, + MEMORY_DEVICE_GENERIC, + MEMORY_DEVICE_PCI_P2PDMA, +}; + +/** + * struct vmem_altmap - pre-allocated storage for vmemmap_populate + * @base_pfn: base of the entire dev_pagemap mapping + * @reserve: pages mapped, but reserved for driver use (relative to @base) + * @free: free pages set aside in the mapping for memmap storage + * @align: pages reserved to meet allocation alignments + * @alloc: track pages consumed, private to vmemmap_populate() + */ +struct vmem_altmap { + unsigned long base_pfn; + const unsigned long end_pfn; + const unsigned long reserve; + unsigned long free; + unsigned long align; + unsigned long alloc; +}; + +/** + * struct dev_pagemap - metadata for ZONE_DEVICE mappings + * @altmap: pre-allocated/reserved memory for vmemmap allocations + * @ref: reference count that pins the devm_memremap_pages() mapping + * @done: completion for @ref + * @type: memory type: see MEMORY_* in memory_hotplug.h + * @flags: PGMAP_* flags to specify defailed behavior + * @vmemmap_shift: structural definition of how the vmemmap page metadata + * is populated, specifically the metadata page order. + * A zero value (default) uses base pages as the vmemmap metadata + * representation. A bigger value will set up compound struct pages + * of the requested order value. + * @ops: method table + * @owner: an opaque pointer identifying the entity that manages this + * instance. Used by various helpers to make sure that no + * foreign ZONE_DEVICE memory is accessed. + * @nr_range: number of ranges to be mapped + * @range: range to be mapped when nr_range == 1 + * @ranges: array of ranges to be mapped when nr_range > 1 + */ +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + unsigned long vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct range ranges[0]; + }; +}; + +static inline enum zone_type page_zonenum(const struct page *page) +{ + ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; +} + +static inline enum zone_type folio_zonenum(const struct folio *folio) +{ + return page_zonenum(&folio->page); +} + +static inline bool is_zone_movable_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_MOVABLE; +} + +#ifdef CONFIG_ZONE_DEVICE +static inline bool is_zone_device_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_DEVICE; +} +extern void memmap_init_zone_device(struct zone *, unsigned long, + unsigned long, struct dev_pagemap *); +#else +static inline bool is_zone_device_page(const struct page *page) +{ + return false; +} +#endif + +static inline bool folio_is_zone_device(const struct folio *folio) +{ + return is_zone_device_page(&folio->page); +} + +static inline bool is_device_private_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; +} + +static inline bool folio_is_device_private(const struct folio *folio) +{ + return is_device_private_page(&folio->page); +} + +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_PCI_P2PDMA) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; +} + +#endif /* _PAGE_ZONE_H_ */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d63c5856d006c..5a75cb20ebbfa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -53,7 +53,6 @@ #include #include #include -#include #include #include #include From 211255708f8464ade72f38556d88d32caa278044 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:37 -0500 Subject: [PATCH 095/321] mm: add zone device coherent type memory support Device memory that is cache coherent from device and CPU point of view. This is used on platforms that have an advanced system bus (like CAPI or CXL). Any page of a process can be migrated to such memory. However, no one should be allowed to pin such memory so that it can always be evicted. [hch@lst.de: rebased ontop of the refcount changes, removed is_dev_private_or_coherent_page] Link: https://lkml.kernel.org/r/20220707190349.9778-4-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Popple Acked-by: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- include/linux/mm.h | 4 +++- include/linux/page_zone.h | 19 +++++++++++++++++++ mm/memcontrol.c | 7 ++++--- mm/memory-failure.c | 8 ++++++-- mm/memremap.c | 10 ++++++++++ mm/migrate_device.c | 16 +++++++--------- mm/rmap.c | 5 +++-- 7 files changed, 52 insertions(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 9671e06aff022..b976796ce96fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1573,7 +1573,9 @@ static inline bool is_longterm_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)); + return !(is_device_coherent_page(page) || + is_zone_movable_page(page) || + is_zero_pfn(page_to_pfn(page))); } #else static inline bool is_longterm_pinnable_page(struct page *page) diff --git a/include/linux/page_zone.h b/include/linux/page_zone.h index 7cded718ad06e..d4415b61b8d8d 100644 --- a/include/linux/page_zone.h +++ b/include/linux/page_zone.h @@ -59,6 +59,13 @@ * A more complete discussion of unaddressable memory may be found in * include/linux/hmm.h and Documentation/mm/hmm.rst. * + * MEMORY_DEVICE_COHERENT: + * Device memory that is cache coherent from device and CPU point of view. This + * is used on platforms that have an advanced system bus (like CAPI or CXL). A + * driver can hotplug the device memory using ZONE_DEVICE and with that memory + * type. Any page of a process can be migrated to such memory. However no one + * should be allowed to pin such memory so that it can always be evicted. + * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In support of coordinating page @@ -79,6 +86,7 @@ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT, MEMORY_DEVICE_FS_DAX, MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, @@ -191,4 +199,15 @@ static inline bool is_pci_p2pdma_page(const struct page *page) page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } +static inline bool is_device_coherent_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_COHERENT; +} + +static inline bool folio_is_device_coherent(const struct folio *folio) +{ + return is_device_coherent_page(&folio->page); +} + #endif /* _PAGE_ZONE_H_ */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5a75cb20ebbfa..b56d1f654ff98 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5715,8 +5715,8 @@ static int mem_cgroup_move_account(struct page *page, * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a * target for charge migration. if @target is not NULL, the entry is stored * in target->ent. - * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE - * (so ZONE_DEVICE page and thus not on the lru). + * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is device memory and + * thus not on the lru. * For now we such page is charge like a regular page would be as for all * intent and purposes it is just special memory taking the place of a * regular page. @@ -5754,7 +5754,8 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, */ if (page_memcg(page) == mc.from) { ret = MC_TARGET_PAGE; - if (is_device_private_page(page)) + if (is_device_private_page(page) || + is_device_coherent_page(page)) ret = MC_TARGET_DEVICE; if (target) target->page = page; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f7612ccdb2999..b7ca5db7e60ea 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1686,12 +1686,16 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, goto unlock; } - if (pgmap->type == MEMORY_DEVICE_PRIVATE) { + switch (pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: /* - * TODO: Handle HMM pages which may need coordination + * TODO: Handle device pages which may need coordination * with device-side memory. */ goto unlock; + default: + break; } /* diff --git a/mm/memremap.c b/mm/memremap.c index 5dceb6e9c973d..58b20c3c300b8 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -315,6 +315,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) return ERR_PTR(-EINVAL); } break; + case MEMORY_DEVICE_COHERENT: + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return ERR_PTR(-EINVAL); + } + if (!pgmap->owner) { + WARN(1, "Missing owner\n"); + return ERR_PTR(-EINVAL); + } + break; case MEMORY_DEVICE_FS_DAX: if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { WARN(1, "File system DAX not supported\n"); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 5052093d0262d..a4847ad65da3c 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -518,7 +518,7 @@ EXPORT_SYMBOL(migrate_vma_setup); * handle_pte_fault() * do_anonymous_page() * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE - * private page. + * private or coherent page. */ static void migrate_vma_insert_page(struct migrate_vma *migrate, unsigned long addr, @@ -594,11 +594,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, page_to_pfn(page)); entry = swp_entry_to_pte(swp_entry); } else { - /* - * For now we only support migrating to un-addressable device - * memory. - */ - if (is_zone_device_page(page)) { + if (is_zone_device_page(page) && + !is_device_coherent_page(page)) { pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); goto abort; } @@ -701,10 +698,11 @@ void migrate_vma_pages(struct migrate_vma *migrate) mapping = page_mapping(page); - if (is_device_private_page(newpage)) { + if (is_device_private_page(newpage) || + is_device_coherent_page(newpage)) { /* - * For now only support private anonymous when migrating - * to un-addressable device memory. + * For now only support anonymous memory migrating to + * device private or coherent memory. */ if (mapping) { migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; diff --git a/mm/rmap.c b/mm/rmap.c index c3557d9c419a5..2a5f1d2d1247a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1968,7 +1968,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (folio_is_zone_device(folio)) { + if (folio_is_device_private(folio)) { unsigned long pfn = folio_pfn(folio); swp_entry_t entry; pte_t swp_pte; @@ -2131,7 +2131,8 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) TTU_SYNC))) return; - if (folio_is_zone_device(folio) && !folio_is_device_private(folio)) + if (folio_is_zone_device(folio) && + (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) return; /* From c8fa7019937e871ef76929f1baf5eb24ede6a473 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:38 -0500 Subject: [PATCH 096/321] mm: handling Non-LRU pages returned by vm_normal_pages With DEVICE_COHERENT, we'll soon have vm_normal_pages() return device-managed anonymous pages that are not LRU pages. Although they behave like normal pages for purposes of mapping in CPU page, and for COW. They do not support LRU lists, NUMA migration or THP. Callers to follow_page() currently don't expect ZONE_DEVICE pages, however, with DEVICE_COHERENT we might now return ZONE_DEVICE. Check for ZONE_DEVICE pages in applicable users of follow_page() as well. Link: https://lkml.kernel.org/r/20220707190349.9778-5-alex.sierra@amd.com Signed-off-by: Alex Sierra Acked-by: Felix Kuehling [v2] Reviewed-by: Alistair Popple [v6] Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 2 +- mm/huge_memory.c | 2 +- mm/khugepaged.c | 9 ++++++--- mm/ksm.c | 6 +++--- mm/madvise.c | 4 ++-- mm/memory.c | 10 +++++++++- mm/mempolicy.c | 2 +- mm/migrate.c | 4 ++-- mm/mlock.c | 2 +- mm/mprotect.c | 2 +- 10 files changed, 27 insertions(+), 16 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 21cb006cc97f4..0f2bb17026552 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1803,7 +1803,7 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, return NULL; page = vm_normal_page(vma, addr, pte); - if (!page) + if (!page || is_zone_device_page(page)) return NULL; if (PageReserved(page)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 79bba350f2964..e6598f1d5bcd8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2910,7 +2910,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, if (IS_ERR(page)) continue; - if (!page) + if (!page || is_zone_device_page(page)) continue; if (!is_transparent_hugepage(page)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 00531f56069ba..37dcf95640e6e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -611,7 +611,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, goto out; } page = vm_normal_page(vma, address, pteval); - if (unlikely(!page)) { + if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out; } @@ -1261,7 +1261,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, writable = true; page = vm_normal_page(vma, _address, pteval); - if (unlikely(!page)) { + if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out_unmap; } @@ -1472,7 +1472,8 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) goto abort; page = vm_normal_page(vma, addr, *pte); - + if (WARN_ON_ONCE(page && is_zone_device_page(page))) + page = NULL; /* * Note that uprobe, debugger, or MAP_PRIVATE may change the * page table, but the new page will not be a subpage of hpage. @@ -1490,6 +1491,8 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (pte_none(*pte)) continue; page = vm_normal_page(vma, addr, *pte); + if (WARN_ON_ONCE(page && is_zone_device_page(page))) + goto abort; page_remove_rmap(page, vma, false); } diff --git a/mm/ksm.c b/mm/ksm.c index 73cbe2a628ed3..075123602bd07 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -475,7 +475,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) cond_resched(); page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); - if (IS_ERR_OR_NULL(page)) + if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) break; if (PageKsm(page)) ret = handle_mm_fault(vma, addr, @@ -560,7 +560,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) goto out; page = follow_page(vma, addr, FOLL_GET); - if (IS_ERR_OR_NULL(page)) + if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) goto out; if (PageAnon(page)) { flush_anon_page(vma, page, addr); @@ -2311,7 +2311,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (IS_ERR_OR_NULL(*page)) { + if (IS_ERR_OR_NULL(*page) || is_zone_device_page(*page)) { ksm_scan.address += PAGE_SIZE; cond_resched(); continue; diff --git a/mm/madvise.c b/mm/madvise.c index 5ae5cf4d1a9b0..851fa4e134bc5 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -421,7 +421,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, continue; page = vm_normal_page(vma, addr, ptent); - if (!page) + if (!page || is_zone_device_page(page)) continue; /* @@ -639,7 +639,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, } page = vm_normal_page(vma, addr, ptent); - if (!page) + if (!page || is_zone_device_page(page)) continue; /* diff --git a/mm/memory.c b/mm/memory.c index 2028d3b23db7e..5c14a9037b810 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -633,6 +633,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, if (is_zero_pfn(pfn)) return NULL; if (pte_devmap(pte)) + /* + * NOTE: New users of ZONE_DEVICE will not set pte_devmap() + * and will have refcounts incremented on their struct pages + * when they are inserted into PTEs, thus they are safe to + * return here. Legacy ZONE_DEVICE pages that set pte_devmap() + * do not have refcounts. Example of legacy ZONE_DEVICE is + * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. + */ return NULL; print_bad_pte(vma, addr, pte, NULL); @@ -4709,7 +4717,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte = pte_modify(old_pte, vma->vm_page_prot); page = vm_normal_page(vma, vmf->address, pte); - if (!page) + if (!page || is_zone_device_page(page)) goto out_map; /* TODO: handle PTE-mapped THP */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 53064bfba061c..dc74239d1ac77 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -524,7 +524,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); - if (!page) + if (!page || is_zone_device_page(page)) continue; /* * vm_normal_page() filters out zero pages, but there might diff --git a/mm/migrate.c b/mm/migrate.c index 7934eebf16891..1649270bc1a77 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1630,7 +1630,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, goto out; err = -ENOENT; - if (!page) + if (!page || is_zone_device_page(page)) goto out; err = 0; @@ -1821,7 +1821,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, if (IS_ERR(page)) goto set_status; - if (page) { + if (page && !is_zone_device_page(page)) { err = page_to_nid(page); put_page(page); } else { diff --git a/mm/mlock.c b/mm/mlock.c index c41604ba5197d..43d19a1f28eb3 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -333,7 +333,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); - if (!page) + if (!page || is_zone_device_page(page)) continue; if (PageTransCompound(page)) continue; diff --git a/mm/mprotect.c b/mm/mprotect.c index ff0d4881cb071..041beeb5fad65 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -127,7 +127,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb, continue; page = vm_normal_page(vma, addr, oldpte); - if (!page || PageKsm(page)) + if (!page || is_zone_device_page(page) || PageKsm(page)) continue; /* Also skip shared copy-on-write pages */ From 6508453da6a30e071933278d666ecb3d87cedbaf Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:39 -0500 Subject: [PATCH 097/321] mm: add device coherent vma selection for memory migration This case is used to migrate pages from device memory, back to system memory. Device coherent type memory is cache coherent from device and CPU point of view. Link: https://lkml.kernel.org/r/20220707190349.9778-6-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Poppple Reviewed-by: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- include/linux/migrate.h | 1 + mm/migrate_device.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 069a89e847f34..b84908debe5cd 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -148,6 +148,7 @@ static inline unsigned long migrate_pfn(unsigned long pfn) enum migrate_vma_direction { MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, + MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, }; struct migrate_vma { diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a4847ad65da3c..18bc6483f63a2 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -148,15 +148,21 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, if (is_writable_device_private_entry(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { - if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) - goto next; pfn = pte_pfn(pte); - if (is_zero_pfn(pfn)) { + if (is_zero_pfn(pfn) && + (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { mpfn = MIGRATE_PFN_MIGRATE; migrate->cpages++; goto next; } page = vm_normal_page(migrate->vma, addr, pte); + if (page && !is_zone_device_page(page) && + !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) + goto next; + else if (page && is_device_coherent_page(page) && + (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || + page->pgmap->owner != migrate->pgmap_owner)) + goto next; mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; } From 9d3942e047eeacd0657eb94f0178c44dc2d4a08b Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 7 Jul 2022 14:03:40 -0500 Subject: [PATCH 098/321] mm: remove the vma check in migrate_vma_setup() migrate_vma_setup() checks that a valid vma is passed so that the page tables can be walked to find the pfns associated with a given address range. However in some cases the pfns are already known, such as when migrating device coherent pages during pin_user_pages() meaning a valid vma isn't required. Link: https://lkml.kernel.org/r/20220707190349.9778-7-alex.sierra@amd.com Signed-off-by: Alistair Popple Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- mm/migrate_device.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 18bc6483f63a2..cf9668376c5a8 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -486,24 +486,24 @@ int migrate_vma_setup(struct migrate_vma *args) args->start &= PAGE_MASK; args->end &= PAGE_MASK; - if (!args->vma || is_vm_hugetlb_page(args->vma) || - (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) - return -EINVAL; - if (nr_pages <= 0) - return -EINVAL; - if (args->start < args->vma->vm_start || - args->start >= args->vma->vm_end) - return -EINVAL; - if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) - return -EINVAL; if (!args->src || !args->dst) return -EINVAL; - - memset(args->src, 0, sizeof(*args->src) * nr_pages); - args->cpages = 0; - args->npages = 0; - - migrate_vma_collect(args); + if (args->vma) { + if (is_vm_hugetlb_page(args->vma) || + (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) + return -EINVAL; + if (args->start < args->vma->vm_start || + args->start >= args->vma->vm_end) + return -EINVAL; + if (args->end <= args->vma->vm_start || + args->end > args->vma->vm_end) + return -EINVAL; + memset(args->src, 0, sizeof(*args->src) * nr_pages); + args->cpages = 0; + args->npages = 0; + + migrate_vma_collect(args); + } if (args->cpages) migrate_vma_unmap(args); @@ -685,7 +685,7 @@ void migrate_vma_pages(struct migrate_vma *migrate) continue; } - if (!page) { + if (!page && migrate->vma) { if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) continue; if (!notified) { From 647f86046c8f8a7589707a6e682b579b0db2c3fb Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 7 Jul 2022 14:03:41 -0500 Subject: [PATCH 099/321] mm/gup: migrate device coherent pages when pinning instead of failing Currently any attempts to pin a device coherent page will fail. This is because device coherent pages need to be managed by a device driver, and pinning them would prevent a driver from migrating them off the device. However this is no reason to fail pinning of these pages. These are coherent and accessible from the CPU so can be migrated just like pinning ZONE_MOVABLE pages. So instead of failing all attempts to pin them first try migrating them out of ZONE_DEVICE. [hch@lst.de: rebased to the split device memory checks, moved migrate_device_page to migrate_device.c] Link: https://lkml.kernel.org/r/20220707190349.9778-8-alex.sierra@amd.com Signed-off-by: Alistair Popple Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- mm/gup.c | 47 +++++++++++++++++++++++++++++++++++----- mm/internal.h | 1 + mm/migrate_device.c | 53 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 5 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 05af1c8670808..e3aec764e3b59 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1926,9 +1926,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, continue; prev_folio = folio; - if (folio_is_longterm_pinnable(folio)) + /* + * Device private pages will get faulted in during gup so it + * shouldn't be possible to see one here. + */ + if (WARN_ON_ONCE(folio_is_device_private(folio))) { + ret = -EFAULT; + goto unpin_pages; + } + + /* + * Device coherent pages are managed by a driver and should not + * be pinned indefinitely as it prevents the driver moving the + * page. So when trying to pin with FOLL_LONGTERM instead try + * to migrate the page out of device memory. + */ + if (folio_is_device_coherent(folio)) { + WARN_ON_ONCE(PageCompound(&folio->page)); + + /* + * Migration will fail if the page is pinned, so convert + * the pin on the source page to a normal reference. + */ + if (gup_flags & FOLL_PIN) { + get_page(&folio->page); + unpin_user_page(&folio->page); + } + + pages[i] = migrate_device_page(&folio->page, gup_flags); + if (!pages[i]) { + ret = -EBUSY; + goto unpin_pages; + } continue; + } + if (folio_is_longterm_pinnable(folio)) + continue; /* * Try to move out any movable page before pinning the range. */ @@ -1964,10 +1998,13 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, return nr_pages; unpin_pages: - if (gup_flags & FOLL_PIN) { - unpin_user_pages(pages, nr_pages); - } else { - for (i = 0; i < nr_pages; i++) + for (i = 0; i < nr_pages; i++) { + if (!pages[i]) + continue; + + if (gup_flags & FOLL_PIN) + unpin_user_page(pages[i]); + else put_page(pages[i]); } diff --git a/mm/internal.h b/mm/internal.h index eba79d1d3b06a..6e14749ad1e53 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -851,6 +851,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); void free_zone_device_page(struct page *page); +struct page *migrate_device_page(struct page *page, unsigned int gup_flags); /* * mm/gup.c diff --git a/mm/migrate_device.c b/mm/migrate_device.c index cf9668376c5a8..5decd26dd551b 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -794,3 +794,56 @@ void migrate_vma_finalize(struct migrate_vma *migrate) } } EXPORT_SYMBOL(migrate_vma_finalize); + +/* + * Migrate a device coherent page back to normal memory. The caller should have + * a reference on page which will be copied to the new page if migration is + * successful or dropped on failure. + */ +struct page *migrate_device_page(struct page *page, unsigned int gup_flags) +{ + unsigned long src_pfn, dst_pfn = 0; + struct migrate_vma args; + struct page *dpage; + + lock_page(page); + src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; + args.src = &src_pfn; + args.dst = &dst_pfn; + args.cpages = 1; + args.npages = 1; + args.vma = NULL; + migrate_vma_setup(&args); + if (!(src_pfn & MIGRATE_PFN_MIGRATE)) + return NULL; + + dpage = alloc_pages(GFP_USER | __GFP_NOWARN, 0); + + /* + * get/pin the new page now so we don't have to retry gup after + * migrating. We already have a reference so this should never fail. + */ + if (dpage && WARN_ON_ONCE(!try_grab_page(dpage, gup_flags))) { + __free_pages(dpage, 0); + dpage = NULL; + } + + if (dpage) { + lock_page(dpage); + dst_pfn = migrate_pfn(page_to_pfn(dpage)); + } + + migrate_vma_pages(&args); + if (src_pfn & MIGRATE_PFN_MIGRATE) + copy_highpage(dpage, page); + migrate_vma_finalize(&args); + if (dpage && !(src_pfn & MIGRATE_PFN_MIGRATE)) { + if (gup_flags & FOLL_PIN) + unpin_user_page(dpage); + else + put_page(dpage); + dpage = NULL; + } + + return dpage; +} From 0bcaedef4e5ad7247977eddc4d612b8800b75183 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:42 -0500 Subject: [PATCH 100/321] drm/amdkfd: add SPM support for SVM When CPU is connected throug XGMI, it has coherent access to VRAM resource. In this case that resource is taken from a table in the device gmc aperture base. This resource is used along with the device type, which could be DEVICE_PRIVATE or DEVICE_COHERENT to create the device page map region. Also, MIGRATE_VMA_SELECT_DEVICE_COHERENT flag is selected for coherent type case during migration to device. Link: https://lkml.kernel.org/r/20220707190349.9778-9-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Reviewed-by: Felix Kuehling Cc: Alistair Popple Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 34 +++++++++++++++--------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index e44376c2ecdcd..f73e3e340413c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -671,13 +671,15 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, migrate.vma = vma; migrate.start = start; migrate.end = end; - migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); + if (adev->gmc.xgmi.connected_to_cpu) + migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT; + else + migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), GFP_KERNEL); - if (!buf) goto out; @@ -947,7 +949,7 @@ int svm_migrate_init(struct amdgpu_device *adev) { struct kfd_dev *kfddev = adev->kfd.dev; struct dev_pagemap *pgmap; - struct resource *res; + struct resource *res = NULL; unsigned long size; void *r; @@ -962,28 +964,34 @@ int svm_migrate_init(struct amdgpu_device *adev) * should remove reserved size */ size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); - res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); - if (IS_ERR(res)) - return -ENOMEM; + if (adev->gmc.xgmi.connected_to_cpu) { + pgmap->range.start = adev->gmc.aper_base; + pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; + pgmap->type = MEMORY_DEVICE_COHERENT; + } else { + res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); + if (IS_ERR(res)) + return -ENOMEM; + pgmap->range.start = res->start; + pgmap->range.end = res->end; + pgmap->type = MEMORY_DEVICE_PRIVATE; + } - pgmap->type = MEMORY_DEVICE_PRIVATE; pgmap->nr_range = 1; - pgmap->range.start = res->start; - pgmap->range.end = res->end; pgmap->ops = &svm_migrate_pgmap_ops; pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); - pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; - + pgmap->flags = 0; /* Device manager releases device-specific resources, memory region and * pgmap when driver disconnects from device. */ r = devm_memremap_pages(adev->dev, pgmap); if (IS_ERR(r)) { pr_err("failed to register HMM device memory\n"); - /* Disable SVM support capability */ pgmap->type = 0; - devm_release_mem_region(adev->dev, res->start, resource_size(res)); + if (pgmap->type == MEMORY_DEVICE_PRIVATE) + devm_release_mem_region(adev->dev, res->start, + res->end - res->start + 1); return PTR_ERR(r); } From fcabd25dc5bde399d8013f4a7a267a4dfda616fb Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:43 -0500 Subject: [PATCH 101/321] lib: test_hmm add ioctl to get zone device type new ioctl cmd added to query zone device type. This will be used once the test_hmm adds zone device coherent type. Link: https://lkml.kernel.org/r/20220707190349.9778-10-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Poppple Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- lib/test_hmm.c | 11 +++++++++-- lib/test_hmm_uapi.h | 14 ++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index f2c3015c5c82c..ed737eae5959b 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -87,6 +87,7 @@ struct dmirror_chunk { struct dmirror_device { struct cdev cdevice; struct hmm_devmem *devmem; + unsigned int zone_device_type; unsigned int devmem_capacity; unsigned int devmem_count; @@ -1266,14 +1267,20 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) static int __init hmm_dmirror_init(void) { int ret; - int id; + int id = 0; + int ndevices = 0; ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES, "HMM_DMIRROR"); if (ret) goto err_unreg; - for (id = 0; id < DMIRROR_NDEVICES; id++) { + memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0])); + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; + for (id = 0; id < ndevices; id++) { ret = dmirror_device_init(dmirror_devices + id, id); if (ret) goto err_chrdev; diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index f14dea5dcd062..0511af7464eed 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -31,10 +31,11 @@ struct hmm_dmirror_cmd { /* Expose the address space of the calling process through hmm device file */ #define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd) #define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_MIGRATE_TO_DEV _IOWR('H', 0x02, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_MIGRATE_TO_SYS _IOWR('H', 0x03, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd) /* * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT. @@ -62,4 +63,9 @@ enum { HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30, }; +enum { + /* 0 is reserved to catch uninitialized type fields */ + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1, +}; + #endif /* _LIB_TEST_HMM_UAPI_H */ From 163a1f8a8ed4874d9b553e00dbc37431e7d23b16 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:44 -0500 Subject: [PATCH 102/321] lib: test_hmm add module param for zone device type In order to configure device coherent in test_hmm, two module parameters should be passed, which correspond to the SP start address of each device (2) spm_addr_dev0 & spm_addr_dev1. If no parameters are passed, private device type is configured. Link: https://lkml.kernel.org/r/20220707190349.9778-11-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Poppple Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- lib/test_hmm.c | 73 ++++++++++++++++++++++++++++++++------------- lib/test_hmm_uapi.h | 1 + 2 files changed, 53 insertions(+), 21 deletions(-) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index ed737eae5959b..436124da00e66 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -37,6 +37,16 @@ #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16 +static unsigned long spm_addr_dev0; +module_param(spm_addr_dev0, long, 0644); +MODULE_PARM_DESC(spm_addr_dev0, + "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); + +static unsigned long spm_addr_dev1; +module_param(spm_addr_dev1, long, 0644); +MODULE_PARM_DESC(spm_addr_dev1, + "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); + static const struct dev_pagemap_ops dmirror_devmem_ops; static const struct mmu_interval_notifier_ops dmirror_min_ops; static dev_t dmirror_dev; @@ -455,28 +465,44 @@ static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) return ret; } -static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, +static int dmirror_allocate_chunk(struct dmirror_device *mdevice, struct page **ppage) { struct dmirror_chunk *devmem; - struct resource *res; + struct resource *res = NULL; unsigned long pfn; unsigned long pfn_first; unsigned long pfn_last; void *ptr; + int ret = -ENOMEM; devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem) - return false; + return ret; - res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, - "hmm_dmirror"); - if (IS_ERR(res)) + switch (mdevice->zone_device_type) { + case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE: + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, + "hmm_dmirror"); + if (IS_ERR_OR_NULL(res)) + goto err_devmem; + devmem->pagemap.range.start = res->start; + devmem->pagemap.range.end = res->end; + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + break; + case HMM_DMIRROR_MEMORY_DEVICE_COHERENT: + devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? + spm_addr_dev0 : + spm_addr_dev1; + devmem->pagemap.range.end = devmem->pagemap.range.start + + DEVMEM_CHUNK_SIZE - 1; + devmem->pagemap.type = MEMORY_DEVICE_COHERENT; + break; + default: + ret = -EINVAL; goto err_devmem; + } - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; - devmem->pagemap.range.start = res->start; - devmem->pagemap.range.end = res->end; devmem->pagemap.nr_range = 1; devmem->pagemap.ops = &dmirror_devmem_ops; devmem->pagemap.owner = mdevice; @@ -497,10 +523,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, mdevice->devmem_capacity = new_capacity; mdevice->devmem_chunks = new_chunks; } - ptr = memremap_pages(&devmem->pagemap, numa_node_id()); - if (IS_ERR(ptr)) + if (IS_ERR_OR_NULL(ptr)) { + if (ptr) + ret = PTR_ERR(ptr); + else + ret = -EFAULT; goto err_release; + } devmem->mdevice = mdevice; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; @@ -529,15 +559,17 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, } spin_unlock(&mdevice->lock); - return true; + return 0; err_release: mutex_unlock(&mdevice->devmem_lock); - release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); + if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); err_devmem: kfree(devmem); - return false; + return ret; } static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) @@ -562,7 +594,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) spin_unlock(&mdevice->lock); } else { spin_unlock(&mdevice->lock); - if (!dmirror_allocate_chunk(mdevice, &dpage)) + if (dmirror_allocate_chunk(mdevice, &dpage)) goto error; } @@ -1238,10 +1270,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id) if (ret) return ret; - /* Build a list of free ZONE_DEVICE private struct pages */ - dmirror_allocate_chunk(mdevice, NULL); - - return 0; + /* Build a list of free ZONE_DEVICE struct pages */ + return dmirror_allocate_chunk(mdevice, NULL); } static void dmirror_device_remove(struct dmirror_device *mdevice) @@ -1254,8 +1284,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) mdevice->devmem_chunks[i]; memunmap_pages(&devmem->pagemap); - release_mem_region(devmem->pagemap.range.start, - range_len(&devmem->pagemap.range)); + if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); kfree(devmem); } kfree(mdevice->devmem_chunks); diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index 0511af7464eed..f700da7807c1c 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -66,6 +66,7 @@ enum { enum { /* 0 is reserved to catch uninitialized type fields */ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1, + HMM_DMIRROR_MEMORY_DEVICE_COHERENT, }; #endif /* _LIB_TEST_HMM_UAPI_H */ From 52bdccbc5fc8f5c3b7aaf70788466fe8385b7cb3 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:45 -0500 Subject: [PATCH 103/321] lib: add support for device coherent type in test_hmm Device Coherent type uses device memory that is coherently accesible by the CPU. This could be shown as SP (special purpose) memory range at the BIOS-e820 memory enumeration. If no SP memory is supported in system, this could be faked by setting CONFIG_EFI_FAKE_MEMMAP. Currently, test_hmm only supports two different SP ranges of at least 256MB size. This could be specified in the kernel parameter variable efi_fake_mem. Ex. Two SP ranges of 1GB starting at 0x100000000 & 0x140000000 physical address. Ex. efi_fake_mem=1G@0x100000000:0x40000,1G@0x140000000:0x40000 Private and coherent device mirror instances can be created in the same probed. This is done by passing the module parameters spm_addr_dev0 & spm_addr_dev1. In this case, it will create four instances of device_mirror. The first two correspond to private device type, the last two to coherent type. Then, they can be easily accessed from user space through /dev/hmm_mirror. Usually num_device 0 and 1 are for private, and 2 and 3 for coherent types. If no module parameters are passed, two instances of private type device_mirror will be created only. Link: https://lkml.kernel.org/r/20220707190349.9778-12-alex.sierra@amd.com Signed-off-by: Alex Sierra Acked-by: Felix Kuehling Reviewed-by: Alistair Poppple Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- lib/test_hmm.c | 253 +++++++++++++++++++++++++++++++++----------- lib/test_hmm_uapi.h | 4 + 2 files changed, 196 insertions(+), 61 deletions(-) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 436124da00e66..e3965cafd27cf 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -32,11 +32,22 @@ #include "test_hmm_uapi.h" -#define DMIRROR_NDEVICES 2 +#define DMIRROR_NDEVICES 4 #define DMIRROR_RANGE_FAULT_TIMEOUT 1000 #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16 +/* + * For device_private pages, dpage is just a dummy struct page + * representing a piece of device memory. dmirror_devmem_alloc_page + * allocates a real system memory page as backing storage to fake a + * real device. zone_device_data points to that backing page. But + * for device_coherent memory, the struct page represents real + * physical CPU-accessible memory that we can use directly. + */ +#define BACKING_PAGE(page) (is_device_private_page((page)) ? \ + (page)->zone_device_data : (page)) + static unsigned long spm_addr_dev0; module_param(spm_addr_dev0, long, 0644); MODULE_PARM_DESC(spm_addr_dev0, @@ -125,6 +136,21 @@ static int dmirror_bounce_init(struct dmirror_bounce *bounce, return 0; } +static bool dmirror_is_private_zone(struct dmirror_device *mdevice) +{ + return (mdevice->zone_device_type == + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false; +} + +static enum migrate_vma_direction +dmirror_select_device(struct dmirror *dmirror) +{ + return (dmirror->mdevice->zone_device_type == + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? + MIGRATE_VMA_SELECT_DEVICE_PRIVATE : + MIGRATE_VMA_SELECT_DEVICE_COHERENT; +} + static void dmirror_bounce_fini(struct dmirror_bounce *bounce) { vfree(bounce->ptr); @@ -575,16 +601,19 @@ static int dmirror_allocate_chunk(struct dmirror_device *mdevice, static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) { struct page *dpage = NULL; - struct page *rpage; + struct page *rpage = NULL; /* - * This is a fake device so we alloc real system memory to store - * our device memory. + * For ZONE_DEVICE private type, this is a fake device so we allocate + * real system memory to store our device memory. + * For ZONE_DEVICE coherent type we use the actual dpage to store the + * data and ignore rpage. */ - rpage = alloc_page(GFP_HIGHUSER); - if (!rpage) - return NULL; - + if (dmirror_is_private_zone(mdevice)) { + rpage = alloc_page(GFP_HIGHUSER); + if (!rpage) + return NULL; + } spin_lock(&mdevice->lock); if (mdevice->free_pages) { @@ -603,7 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) return dpage; error: - __free_page(rpage); + if (rpage) + __free_page(rpage); return NULL; } @@ -629,12 +659,16 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, * unallocated pte_none() or read-only zero page. */ spage = migrate_pfn_to_page(*src); + if (WARN(spage && is_zone_device_page(spage), + "page already in device spage pfn: 0x%lx\n", + page_to_pfn(spage))) + continue; dpage = dmirror_devmem_alloc_page(mdevice); if (!dpage) continue; - rpage = dpage->zone_device_data; + rpage = BACKING_PAGE(dpage); if (spage) copy_highpage(rpage, spage); else @@ -648,6 +682,8 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, */ rpage->zone_device_data = dmirror; + pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n", + page_to_pfn(spage), page_to_pfn(dpage)); *dst = migrate_pfn(page_to_pfn(dpage)); if ((*src & MIGRATE_PFN_WRITE) || (!spage && args->vma->vm_flags & VM_WRITE)) @@ -725,11 +761,7 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, if (!dpage) continue; - /* - * Store the page that holds the data so the page table - * doesn't have to deal with ZONE_DEVICE private pages. - */ - entry = dpage->zone_device_data; + entry = BACKING_PAGE(dpage); if (*dst & MIGRATE_PFN_WRITE) entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); @@ -815,15 +847,126 @@ static int dmirror_exclusive(struct dmirror *dmirror, return ret; } -static int dmirror_migrate(struct dmirror *dmirror, - struct hmm_dmirror_cmd *cmd) +static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, + struct dmirror *dmirror) +{ + const unsigned long *src = args->src; + unsigned long *dst = args->dst; + unsigned long start = args->start; + unsigned long end = args->end; + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE, + src++, dst++) { + struct page *dpage, *spage; + + spage = migrate_pfn_to_page(*src); + if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) + continue; + + if (WARN_ON(!is_device_private_page(spage) && + !is_device_coherent_page(spage))) + continue; + spage = BACKING_PAGE(spage); + dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); + if (!dpage) + continue; + pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n", + page_to_pfn(spage), page_to_pfn(dpage)); + + lock_page(dpage); + xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); + copy_highpage(dpage, spage); + *dst = migrate_pfn(page_to_pfn(dpage)); + if (*src & MIGRATE_PFN_WRITE) + *dst |= MIGRATE_PFN_WRITE; + } + return 0; +} + +static unsigned long +dmirror_successful_migrated_pages(struct migrate_vma *migrate) +{ + unsigned long cpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + cpages++; + } + return cpages; +} + +static int dmirror_migrate_to_system(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) { unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; - unsigned long src_pfns[64]; - unsigned long dst_pfns[64]; + unsigned long src_pfns[64] = { 0 }; + unsigned long dst_pfns[64] = { 0 }; + struct migrate_vma args; + unsigned long next; + int ret; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + /* Since the mm is for the mirrored process, get a reference first. */ + if (!mmget_not_zero(mm)) + return -EINVAL; + + cmd->cpages = 0; + mmap_read_lock(mm); + for (addr = start; addr < end; addr = next) { + vma = vma_lookup(mm, addr); + if (!vma || !(vma->vm_flags & VM_READ)) { + ret = -EINVAL; + goto out; + } + next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); + if (next > vma->vm_end) + next = vma->vm_end; + + args.vma = vma; + args.src = src_pfns; + args.dst = dst_pfns; + args.start = addr; + args.end = next; + args.pgmap_owner = dmirror->mdevice; + args.flags = dmirror_select_device(dmirror); + + ret = migrate_vma_setup(&args); + if (ret) + goto out; + + pr_debug("Migrating from device mem to sys mem\n"); + dmirror_devmem_fault_alloc_and_copy(&args, dmirror); + + migrate_vma_pages(&args); + cmd->cpages += dmirror_successful_migrated_pages(&args); + migrate_vma_finalize(&args); + } +out: + mmap_read_unlock(mm); + mmput(mm); + + return ret; +} + +static int dmirror_migrate_to_device(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) +{ + unsigned long start, end, addr; + unsigned long size = cmd->npages << PAGE_SHIFT; + struct mm_struct *mm = dmirror->notifier.mm; + struct vm_area_struct *vma; + unsigned long src_pfns[64] = { 0 }; + unsigned long dst_pfns[64] = { 0 }; struct dmirror_bounce bounce; struct migrate_vma args; unsigned long next; @@ -860,6 +1003,7 @@ static int dmirror_migrate(struct dmirror *dmirror, if (ret) goto out; + pr_debug("Migrating from sys mem to device mem\n"); dmirror_migrate_alloc_and_copy(&args, dmirror); migrate_vma_pages(&args); dmirror_migrate_finalize_and_map(&args, dmirror); @@ -868,7 +1012,10 @@ static int dmirror_migrate(struct dmirror *dmirror, mmap_read_unlock(mm); mmput(mm); - /* Return the migrated data for verification. */ + /* + * Return the migrated data for verification. + * Only for pages in device zone + */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; @@ -911,6 +1058,12 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL; else *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE; + } else if (is_device_coherent_page(page)) { + /* Is the page migrated to this device or some other? */ + if (dmirror->mdevice == dmirror_page_to_device(page)) + *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL; + else + *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE; } else if (is_zero_pfn(page_to_pfn(page))) *perm = HMM_DMIRROR_PROT_ZERO; else @@ -1098,8 +1251,12 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp, ret = dmirror_write(dmirror, &cmd); break; - case HMM_DMIRROR_MIGRATE: - ret = dmirror_migrate(dmirror, &cmd); + case HMM_DMIRROR_MIGRATE_TO_DEV: + ret = dmirror_migrate_to_device(dmirror, &cmd); + break; + + case HMM_DMIRROR_MIGRATE_TO_SYS: + ret = dmirror_migrate_to_system(dmirror, &cmd); break; case HMM_DMIRROR_EXCLUSIVE: @@ -1161,14 +1318,13 @@ static const struct file_operations dmirror_fops = { static void dmirror_devmem_free(struct page *page) { - struct page *rpage = page->zone_device_data; + struct page *rpage = BACKING_PAGE(page); struct dmirror_device *mdevice; - if (rpage) + if (rpage != page) __free_page(rpage); mdevice = dmirror_page_to_device(page); - spin_lock(&mdevice->lock); mdevice->cfree++; page->zone_device_data = mdevice->free_pages; @@ -1176,43 +1332,11 @@ static void dmirror_devmem_free(struct page *page) spin_unlock(&mdevice->lock); } -static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, - struct dmirror *dmirror) -{ - const unsigned long *src = args->src; - unsigned long *dst = args->dst; - unsigned long start = args->start; - unsigned long end = args->end; - unsigned long addr; - - for (addr = start; addr < end; addr += PAGE_SIZE, - src++, dst++) { - struct page *dpage, *spage; - - spage = migrate_pfn_to_page(*src); - if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) - continue; - spage = spage->zone_device_data; - - dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); - if (!dpage) - continue; - - lock_page(dpage); - xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); - copy_highpage(dpage, spage); - *dst = migrate_pfn(page_to_pfn(dpage)); - if (*src & MIGRATE_PFN_WRITE) - *dst |= MIGRATE_PFN_WRITE; - } - return 0; -} - static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) { struct migrate_vma args; - unsigned long src_pfns; - unsigned long dst_pfns; + unsigned long src_pfns = 0; + unsigned long dst_pfns = 0; struct page *rpage; struct dmirror *dmirror; vm_fault_t ret; @@ -1232,7 +1356,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) args.src = &src_pfns; args.dst = &dst_pfns; args.pgmap_owner = dmirror->mdevice; - args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + args.flags = dmirror_select_device(dmirror); if (migrate_vma_setup(&args)) return VM_FAULT_SIGBUS; @@ -1311,6 +1435,12 @@ static int __init hmm_dmirror_init(void) HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; dmirror_devices[ndevices++].zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; + if (spm_addr_dev0 && spm_addr_dev1) { + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_COHERENT; + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_COHERENT; + } for (id = 0; id < ndevices; id++) { ret = dmirror_device_init(dmirror_devices + id, id); if (ret) @@ -1333,7 +1463,8 @@ static void __exit hmm_dmirror_exit(void) int id; for (id = 0; id < DMIRROR_NDEVICES; id++) - dmirror_device_remove(dmirror_devices + id); + if (dmirror_devices[id].zone_device_type) + dmirror_device_remove(dmirror_devices + id); unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); } diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index f700da7807c1c..e31d58c9034a7 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -50,6 +50,8 @@ struct hmm_dmirror_cmd { * device the ioctl() is made * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some * other device + * HMM_DMIRROR_PROT_DEV_COHERENT: Migrate device coherent page on the device + * the ioctl() is made */ enum { HMM_DMIRROR_PROT_ERROR = 0xFF, @@ -61,6 +63,8 @@ enum { HMM_DMIRROR_PROT_ZERO = 0x10, HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20, HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30, + HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL = 0x40, + HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE = 0x50, }; enum { From 5890c812d8151e70dbeb0b81539123df1e77084d Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:46 -0500 Subject: [PATCH 104/321] tools: update hmm-test to support device coherent type Test cases such as migrate_fault and migrate_multiple, were modified to explicit migrate from device to sys memory without the need of page faults, when using device coherent type. Snapshot test case updated to read memory device type first and based on that, get the proper returned results migrate_ping_pong test case added to test explicit migration from device to sys memory for both private and coherent zone types. Helpers to migrate from device to sys memory and vicerversa were also added. Link: https://lkml.kernel.org/r/20220707190349.9778-13-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Popple Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/hmm-tests.c | 121 ++++++++++++++++++++----- 1 file changed, 100 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c index 203323967b507..4b547188ec401 100644 --- a/tools/testing/selftests/vm/hmm-tests.c +++ b/tools/testing/selftests/vm/hmm-tests.c @@ -46,6 +46,13 @@ struct hmm_buffer { uint64_t faults; }; +enum { + HMM_PRIVATE_DEVICE_ONE, + HMM_PRIVATE_DEVICE_TWO, + HMM_COHERENCE_DEVICE_ONE, + HMM_COHERENCE_DEVICE_TWO, +}; + #define TWOMEG (1 << 21) #define HMM_BUFFER_SIZE (1024 << 12) #define HMM_PATH_MAX 64 @@ -60,6 +67,21 @@ FIXTURE(hmm) unsigned int page_shift; }; +FIXTURE_VARIANT(hmm) +{ + int device_number; +}; + +FIXTURE_VARIANT_ADD(hmm, hmm_device_private) +{ + .device_number = HMM_PRIVATE_DEVICE_ONE, +}; + +FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent) +{ + .device_number = HMM_COHERENCE_DEVICE_ONE, +}; + FIXTURE(hmm2) { int fd0; @@ -68,6 +90,24 @@ FIXTURE(hmm2) unsigned int page_shift; }; +FIXTURE_VARIANT(hmm2) +{ + int device_number0; + int device_number1; +}; + +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private) +{ + .device_number0 = HMM_PRIVATE_DEVICE_ONE, + .device_number1 = HMM_PRIVATE_DEVICE_TWO, +}; + +FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent) +{ + .device_number0 = HMM_COHERENCE_DEVICE_ONE, + .device_number1 = HMM_COHERENCE_DEVICE_TWO, +}; + static int hmm_open(int unit) { char pathname[HMM_PATH_MAX]; @@ -81,12 +121,19 @@ static int hmm_open(int unit) return fd; } +static bool hmm_is_coherent_type(int dev_num) +{ + return (dev_num >= HMM_COHERENCE_DEVICE_ONE); +} + FIXTURE_SETUP(hmm) { self->page_size = sysconf(_SC_PAGE_SIZE); self->page_shift = ffs(self->page_size) - 1; - self->fd = hmm_open(0); + self->fd = hmm_open(variant->device_number); + if (self->fd < 0 && hmm_is_coherent_type(variant->device_number)) + SKIP(exit(0), "DEVICE_COHERENT not available"); ASSERT_GE(self->fd, 0); } @@ -95,9 +142,11 @@ FIXTURE_SETUP(hmm2) self->page_size = sysconf(_SC_PAGE_SIZE); self->page_shift = ffs(self->page_size) - 1; - self->fd0 = hmm_open(0); + self->fd0 = hmm_open(variant->device_number0); + if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0)) + SKIP(exit(0), "DEVICE_COHERENT not available"); ASSERT_GE(self->fd0, 0); - self->fd1 = hmm_open(1); + self->fd1 = hmm_open(variant->device_number1); ASSERT_GE(self->fd1, 0); } @@ -211,6 +260,20 @@ static void hmm_nanosleep(unsigned int n) nanosleep(&t, NULL); } +static int hmm_migrate_sys_to_dev(int fd, + struct hmm_buffer *buffer, + unsigned long npages) +{ + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); +} + +static int hmm_migrate_dev_to_sys(int fd, + struct hmm_buffer *buffer, + unsigned long npages) +{ + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); +} + /* * Simple NULL test of device open/close. */ @@ -875,7 +938,7 @@ TEST_F(hmm, migrate) ptr[i] = i; /* Migrate memory to device. */ - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, npages); @@ -923,7 +986,7 @@ TEST_F(hmm, migrate_fault) ptr[i] = i; /* Migrate memory to device. */ - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, npages); @@ -936,7 +999,7 @@ TEST_F(hmm, migrate_fault) ASSERT_EQ(ptr[i], i); /* Migrate memory to the device again. */ - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, npages); @@ -976,7 +1039,7 @@ TEST_F(hmm, migrate_shared) ASSERT_NE(buffer->ptr, MAP_FAILED); /* Migrate memory to device. */ - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); ASSERT_EQ(ret, -ENOENT); hmm_buffer_free(buffer); @@ -1015,7 +1078,7 @@ TEST_F(hmm2, migrate_mixed) p = buffer->ptr; /* Migrating a protected area should be an error. */ - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages); + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages); ASSERT_EQ(ret, -EINVAL); /* Punch a hole after the first page address. */ @@ -1023,7 +1086,7 @@ TEST_F(hmm2, migrate_mixed) ASSERT_EQ(ret, 0); /* We expect an error if the vma doesn't cover the range. */ - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3); + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3); ASSERT_EQ(ret, -EINVAL); /* Page 2 will be a read-only zero page. */ @@ -1055,13 +1118,13 @@ TEST_F(hmm2, migrate_mixed) /* Now try to migrate pages 2-5 to device 1. */ buffer->ptr = p + 2 * self->page_size; - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4); + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, 4); /* Page 5 won't be migrated to device 0 because it's on device 1. */ buffer->ptr = p + 5 * self->page_size; - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); ASSERT_EQ(ret, -ENOENT); buffer->ptr = p; @@ -1070,8 +1133,12 @@ TEST_F(hmm2, migrate_mixed) } /* - * Migrate anonymous memory to device private memory and fault it back to system - * memory multiple times. + * Migrate anonymous memory to device memory and back to system memory + * multiple times. In case of private zone configuration, this is done + * through fault pages accessed by CPU. In case of coherent zone configuration, + * the pages from the device should be explicitly migrated back to system memory. + * The reason is Coherent device zone has coherent access by CPU, therefore + * it will not generate any page fault. */ TEST_F(hmm, migrate_multiple) { @@ -1107,8 +1174,7 @@ TEST_F(hmm, migrate_multiple) ptr[i] = i; /* Migrate memory to device. */ - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, - npages); + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, npages); @@ -1116,7 +1182,13 @@ TEST_F(hmm, migrate_multiple) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) ASSERT_EQ(ptr[i], i); - /* Fault pages back to system memory and check them. */ + /* Migrate back to system memory and check them. */ + if (hmm_is_coherent_type(variant->device_number)) { + ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + } + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) ASSERT_EQ(ptr[i], i); @@ -1354,13 +1426,13 @@ TEST_F(hmm2, snapshot) /* Page 5 will be migrated to device 0. */ buffer->ptr = p + 5 * self->page_size; - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, 1); /* Page 6 will be migrated to device 1. */ buffer->ptr = p + 6 * self->page_size; - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1); + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1); ASSERT_EQ(ret, 0); ASSERT_EQ(buffer->cpages, 1); @@ -1377,9 +1449,16 @@ TEST_F(hmm2, snapshot) ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ); ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ); ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE); - ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | - HMM_DMIRROR_PROT_WRITE); - ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); + if (!hmm_is_coherent_type(variant->device_number0)) { + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | + HMM_DMIRROR_PROT_WRITE); + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); + } else { + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | + HMM_DMIRROR_PROT_WRITE); + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE | + HMM_DMIRROR_PROT_WRITE); + } hmm_buffer_free(buffer); } From 2fc5329059b0fb66e01c543f8c85d548786023df Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:47 -0500 Subject: [PATCH 105/321] tools: update test_hmm script to support SP config Add two more parameters to set spm_addr_dev0 & spm_addr_dev1 addresses. These two parameters configure the start SP addresses for each device in test_hmm driver. Consequently, this configures zone device type as coherent. Link: https://lkml.kernel.org/r/20220707190349.9778-14-alex.sierra@amd.com Signed-off-by: Alex Sierra Signed-off-by: Christoph Hellwig Acked-by: Felix Kuehling Reviewed-by: Alistair Popple Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/test_hmm.sh | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/vm/test_hmm.sh b/tools/testing/selftests/vm/test_hmm.sh index 0647b525a6256..539c9371e592a 100755 --- a/tools/testing/selftests/vm/test_hmm.sh +++ b/tools/testing/selftests/vm/test_hmm.sh @@ -40,11 +40,26 @@ check_test_requirements() load_driver() { - modprobe $DRIVER > /dev/null 2>&1 + if [ $# -eq 0 ]; then + modprobe $DRIVER > /dev/null 2>&1 + else + if [ $# -eq 2 ]; then + modprobe $DRIVER spm_addr_dev0=$1 spm_addr_dev1=$2 + > /dev/null 2>&1 + else + echo "Missing module parameters. Make sure pass"\ + "spm_addr_dev0 and spm_addr_dev1" + usage + fi + fi if [ $? == 0 ]; then major=$(awk "\$2==\"HMM_DMIRROR\" {print \$1}" /proc/devices) mknod /dev/hmm_dmirror0 c $major 0 mknod /dev/hmm_dmirror1 c $major 1 + if [ $# -eq 2 ]; then + mknod /dev/hmm_dmirror2 c $major 2 + mknod /dev/hmm_dmirror3 c $major 3 + fi fi } @@ -58,7 +73,7 @@ run_smoke() { echo "Running smoke test. Note, this test provides basic coverage." - load_driver + load_driver $1 $2 $(dirname "${BASH_SOURCE[0]}")/hmm-tests unload_driver } @@ -75,6 +90,9 @@ usage() echo "# Smoke testing" echo "./${TEST_NAME}.sh smoke" echo + echo "# Smoke testing with SPM enabled" + echo "./${TEST_NAME}.sh smoke " + echo exit 0 } @@ -84,7 +102,7 @@ function run_test() usage else if [ "$1" = "smoke" ]; then - run_smoke + run_smoke $2 $3 else usage fi From 36c3fc2bc96c1980ea38be1e74f798c67b8dce88 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:48 -0500 Subject: [PATCH 106/321] tools: add hmm gup tests for device coherent type The intention is to test hmm device coherent type under different get user pages paths. Also, test gup with FOLL_LONGTERM flag set in device coherent pages. These pages should get migrated back to system memory. Link: https://lkml.kernel.org/r/20220707190349.9778-15-alex.sierra@amd.com Signed-off-by: Alex Sierra Reviewed-by: Alistair Popple Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Felix Kuehling Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/hmm-tests.c | 110 +++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c index 4b547188ec401..bb38b97776100 100644 --- a/tools/testing/selftests/vm/hmm-tests.c +++ b/tools/testing/selftests/vm/hmm-tests.c @@ -36,6 +36,7 @@ * in the usual include/uapi/... directory. */ #include "../../../../lib/test_hmm_uapi.h" +#include "../../../../mm/gup_test.h" struct hmm_buffer { void *ptr; @@ -59,6 +60,9 @@ enum { #define NTIMES 10 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1))) +/* Just the flags we need, copied from mm.h: */ +#define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */ FIXTURE(hmm) { @@ -1764,4 +1768,110 @@ TEST_F(hmm, exclusive_cow) hmm_buffer_free(buffer); } +static int gup_test_exec(int gup_fd, unsigned long addr, int cmd, + int npages, int size, int flags) +{ + struct gup_test gup = { + .nr_pages_per_call = npages, + .addr = addr, + .gup_flags = FOLL_WRITE | flags, + .size = size, + }; + + if (ioctl(gup_fd, cmd, &gup)) { + perror("ioctl on error\n"); + return errno; + } + + return 0; +} + +/* + * Test get user device pages through gup_test. Setting PIN_LONGTERM flag. + * This should trigger a migration back to system memory for both, private + * and coherent type pages. + * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added + * to your configuration before you run it. + */ +TEST_F(hmm, hmm_gup_test) +{ + struct hmm_buffer *buffer; + int gup_fd; + unsigned long npages; + unsigned long size; + unsigned long i; + int *ptr; + int ret; + unsigned char *m; + + gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR); + if (gup_fd == -1) + SKIP(return, "Skipping test, could not find gup_test driver"); + + npages = 4; + size = npages << self->page_shift; + + buffer = malloc(sizeof(*buffer)); + ASSERT_NE(buffer, NULL); + + buffer->fd = -1; + buffer->size = size; + buffer->mirror = malloc(size); + ASSERT_NE(buffer->mirror, NULL); + + buffer->ptr = mmap(NULL, size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + buffer->fd, 0); + ASSERT_NE(buffer->ptr, MAP_FAILED); + + /* Initialize buffer in system memory. */ + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) + ptr[i] = i; + + /* Migrate memory to device. */ + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + /* Check what the device read. */ + for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) + ASSERT_EQ(ptr[i], i); + + ASSERT_EQ(gup_test_exec(gup_fd, + (unsigned long)buffer->ptr, + GUP_BASIC_TEST, 1, self->page_size, 0), 0); + ASSERT_EQ(gup_test_exec(gup_fd, + (unsigned long)buffer->ptr + 1 * self->page_size, + GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0); + ASSERT_EQ(gup_test_exec(gup_fd, + (unsigned long)buffer->ptr + 2 * self->page_size, + PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0); + ASSERT_EQ(gup_test_exec(gup_fd, + (unsigned long)buffer->ptr + 3 * self->page_size, + PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0); + + /* Take snapshot to CPU pagetables */ + ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + m = buffer->mirror; + if (hmm_is_coherent_type(variant->device_number)) { + ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]); + ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]); + } else { + ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]); + ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]); + } + ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]); + ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]); + /* + * Check again the content on the pages. Make sure there's no + * corrupted data. + */ + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) + ASSERT_EQ(ptr[i], i); + + close(gup_fd); + hmm_buffer_free(buffer); +} TEST_HARNESS_MAIN From 4f3ef4c23b7cc485c16d4b6aac348c3585de66a8 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 7 Jul 2022 14:03:49 -0500 Subject: [PATCH 107/321] tools: add selftests to hmm for COW in device memory The objective is to test device migration mechanism in pages marked as COW, for private and coherent device type. In case of writing to COW private page(s), a page fault will migrate pages back to system memory first. Then, these pages will be duplicated. In case of COW device coherent type, pages are duplicated directly from device memory. Link: https://lkml.kernel.org/r/20220707190349.9778-16-alex.sierra@amd.com Signed-off-by: Alex Sierra Acked-by: Felix Kuehling Cc: Alistair Popple Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Matthew Wilcox Cc: Ralph Campbell Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/hmm-tests.c | 80 ++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c index bb38b97776100..716b62c05e3d3 100644 --- a/tools/testing/selftests/vm/hmm-tests.c +++ b/tools/testing/selftests/vm/hmm-tests.c @@ -1874,4 +1874,84 @@ TEST_F(hmm, hmm_gup_test) close(gup_fd); hmm_buffer_free(buffer); } + +/* + * Test copy-on-write in device pages. + * In case of writing to COW private page(s), a page fault will migrate pages + * back to system memory first. Then, these pages will be duplicated. In case + * of COW device coherent type, pages are duplicated directly from device + * memory. + */ +TEST_F(hmm, hmm_cow_in_device) +{ + struct hmm_buffer *buffer; + unsigned long npages; + unsigned long size; + unsigned long i; + int *ptr; + int ret; + unsigned char *m; + pid_t pid; + int status; + + npages = 4; + size = npages << self->page_shift; + + buffer = malloc(sizeof(*buffer)); + ASSERT_NE(buffer, NULL); + + buffer->fd = -1; + buffer->size = size; + buffer->mirror = malloc(size); + ASSERT_NE(buffer->mirror, NULL); + + buffer->ptr = mmap(NULL, size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + buffer->fd, 0); + ASSERT_NE(buffer->ptr, MAP_FAILED); + + /* Initialize buffer in system memory. */ + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) + ptr[i] = i; + + /* Migrate memory to device. */ + + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + + pid = fork(); + if (pid == -1) + ASSERT_EQ(pid, 0); + if (!pid) { + /* Child process waitd for SIGTERM from the parent. */ + while (1) { + } + perror("Should not reach this\n"); + exit(0); + } + /* Parent process writes to COW pages(s) and gets a + * new copy in system. In case of device private pages, + * this write causes a migration to system mem first. + */ + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) + ptr[i] = i; + + /* Terminate child and wait */ + EXPECT_EQ(0, kill(pid, SIGTERM)); + EXPECT_EQ(pid, waitpid(pid, &status, 0)); + EXPECT_NE(0, WIFSIGNALED(status)); + EXPECT_EQ(SIGTERM, WTERMSIG(status)); + + /* Take snapshot to CPU pagetables */ + ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + m = buffer->mirror; + for (i = 0; i < npages; i++) + ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]); + + hmm_buffer_free(buffer); +} TEST_HARNESS_MAIN From 284baf7dfbcc0c9596ad7215d6cacd3b10e9d209 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:25 +0800 Subject: [PATCH 108/321] dax: introduce holder for dax_device Patch series "v14 fsdax-rmap + v11 fsdax-reflink", v2. The patchset fsdax-rmap is aimed to support shared pages tracking for fsdax. It moves owner tracking from dax_assocaite_entry() to pmem device driver, by introducing an interface ->memory_failure() for struct pagemap. This interface is called by memory_failure() in mm, and implemented by pmem device. Then call holder operations to find the filesystem which the corrupted data located in, and call filesystem handler to track files or metadata associated with this page. Finally we are able to try to fix the corrupted data in filesystem and do other necessary processing, such as killing processes who are using the files affected. The call trace is like this: memory_failure() |* fsdax case |------------ |pgmap->ops->memory_failure() => pmem_pgmap_memory_failure() | dax_holder_notify_failure() => | dax_device->holder_ops->notify_failure() => | - xfs_dax_notify_failure() | |* xfs_dax_notify_failure() | |-------------------------- | | xfs_rmap_query_range() | | xfs_dax_failure_fn() | | * corrupted on metadata | | try to recover data, call xfs_force_shutdown() | | * corrupted on file data | | try to recover data, call mf_dax_kill_procs() |* normal case |------------- |mf_generic_kill_procs() The patchset fsdax-reflink attempts to add CoW support for fsdax, and takes XFS, which has both reflink and fsdax features, as an example. One of the key mechanisms needed to be implemented in fsdax is CoW. Copy the data from srcmap before we actually write data to the destination iomap. And we just copy range in which data won't be changed. Another mechanism is range comparison. In page cache case, readpage() is used to load data on disk to page cache in order to be able to compare data. In fsdax case, readpage() does not work. So, we need another compare data with direct access support. With the two mechanisms implemented in fsdax, we are able to make reflink and fsdax work together in XFS. This patch (of 14): To easily track filesystem from a pmem device, we introduce a holder for dax_device structure, and also its operation. This holder is used to remember who is using this dax_device: - When it is the backend of a filesystem, the holder will be the instance of this filesystem. - When this pmem device is one of the targets in a mapped device, the holder will be this mapped device. In this case, the mapped device has its own dax_device and it will follow the first rule. So that we can finally track to the filesystem we needed. The holder and holder_ops will be set when filesystem is being mounted, or an target device is being activated. Link: https://lkml.kernel.org/r/20220603053738.1218681-1-ruansy.fnst@fujitsu.com Link: https://lkml.kernel.org/r/20220603053738.1218681-2-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Dan Williams Reviewed-by: Darrick J. Wong Cc: Dave Chinner Cc: Jane Chu Cc: Goldwyn Rodrigues Cc: Al Viro Cc: Matthew Wilcox Cc: Naoya Horiguchi Cc: Miaohe Lin Cc: Dan Williams Cc: Goldwyn Rodrigues Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- drivers/dax/super.c | 67 ++++++++++++++++++++++++++++++++++++++++++++- drivers/md/dm.c | 2 +- fs/erofs/super.c | 10 ++++--- fs/ext2/super.c | 7 +++-- fs/ext4/super.c | 9 +++--- fs/xfs/xfs_buf.c | 5 ++-- include/linux/dax.h | 33 ++++++++++++++++------ 7 files changed, 110 insertions(+), 23 deletions(-) diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 50a08b2ec2474..9b5e2a5eb0ae6 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -22,6 +22,8 @@ * @private: dax driver private data * @flags: state and boolean properties * @ops: operations for this device + * @holder_data: holder of a dax_device: could be filesystem or mapped device + * @holder_ops: operations for the inner holder */ struct dax_device { struct inode inode; @@ -29,6 +31,8 @@ struct dax_device { void *private; unsigned long flags; const struct dax_operations *ops; + void *holder_data; + const struct dax_holder_operations *holder_ops; }; static dev_t dax_devt; @@ -71,8 +75,11 @@ EXPORT_SYMBOL_GPL(dax_remove_host); * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax * @bdev: block device to find a dax_device for * @start_off: returns the byte offset into the dax_device that @bdev starts + * @holder: filesystem or mapped device inside the dax_device + * @ops: operations for the inner holder */ -struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off) +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, + void *holder, const struct dax_holder_operations *ops) { struct dax_device *dax_dev; u64 part_size; @@ -92,11 +99,26 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off) dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk); if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) dax_dev = NULL; + else if (holder) { + if (!cmpxchg(&dax_dev->holder_data, NULL, holder)) + dax_dev->holder_ops = ops; + else + dax_dev = NULL; + } dax_read_unlock(id); return dax_dev; } EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); + +void fs_put_dax(struct dax_device *dax_dev, void *holder) +{ + if (dax_dev && holder && + cmpxchg(&dax_dev->holder_data, holder, NULL) == holder) + dax_dev->holder_ops = NULL; + put_dax(dax_dev); +} +EXPORT_SYMBOL_GPL(fs_put_dax); #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ enum dax_device_flags { @@ -204,6 +226,29 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, } EXPORT_SYMBOL_GPL(dax_recovery_write); +int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, + u64 len, int mf_flags) +{ + int rc, id; + + id = dax_read_lock(); + if (!dax_alive(dax_dev)) { + rc = -ENXIO; + goto out; + } + + if (!dax_dev->holder_ops) { + rc = -EOPNOTSUPP; + goto out; + } + + rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags); +out: + dax_read_unlock(id); + return rc; +} +EXPORT_SYMBOL_GPL(dax_holder_notify_failure); + #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) @@ -277,8 +322,15 @@ void kill_dax(struct dax_device *dax_dev) if (!dax_dev) return; + if (dax_dev->holder_data != NULL) + dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0); + clear_bit(DAXDEV_ALIVE, &dax_dev->flags); synchronize_srcu(&dax_srcu); + + /* clear holder data */ + dax_dev->holder_ops = NULL; + dax_dev->holder_data = NULL; } EXPORT_SYMBOL_GPL(kill_dax); @@ -420,6 +472,19 @@ void put_dax(struct dax_device *dax_dev) } EXPORT_SYMBOL_GPL(put_dax); +/** + * dax_holder() - obtain the holder of a dax device + * @dax_dev: a dax_device instance + + * Return: the holder's data which represents the holder if registered, + * otherwize NULL. + */ +void *dax_holder(struct dax_device *dax_dev) +{ + return dax_dev->holder_data; +} +EXPORT_SYMBOL_GPL(dax_holder); + /** * inode_dax: convert a public inode into its dax_dev * @inode: An inode with i_cdev pointing to a dax_dev diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b75f1ef7386b..0177a4ce9a186 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -758,7 +758,7 @@ static int open_table_device(struct table_device *td, dev_t dev, } td->dm_dev.bdev = bdev; - td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); return 0; } diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 95addc5c9d34d..3173debeaa5a1 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -255,7 +255,8 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, if (IS_ERR(bdev)) return PTR_ERR(bdev); dif->bdev = bdev; - dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off); + dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off, + NULL, NULL); } dif->blocks = le32_to_cpu(dis->blocks); @@ -720,7 +721,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) } sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, - &sbi->dax_part_off); + &sbi->dax_part_off, + NULL, NULL); } err = erofs_read_superblock(sb); @@ -812,7 +814,7 @@ static int erofs_release_device_info(int id, void *ptr, void *data) { struct erofs_device_info *dif = ptr; - fs_put_dax(dif->dax_dev); + fs_put_dax(dif->dax_dev, NULL); if (dif->bdev) blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL); erofs_fscache_unregister_cookie(&dif->fscache); @@ -886,7 +888,7 @@ static void erofs_kill_sb(struct super_block *sb) return; erofs_free_dev_context(sbi->devs); - fs_put_dax(sbi->dax_dev); + fs_put_dax(sbi->dax_dev, NULL); erofs_fscache_unregister_cookie(&sbi->s_fscache); erofs_fscache_unregister_fs(sb); kfree(sbi->opt.fsid); diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f6a19f6d9f6d5..4638946251b9b 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -171,7 +171,7 @@ static void ext2_put_super (struct super_block * sb) brelse (sbi->s_sbh); sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); - fs_put_dax(sbi->s_daxdev); + fs_put_dax(sbi->s_daxdev, NULL); kfree(sbi); } @@ -835,7 +835,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; - sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off); + sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, + NULL, NULL); spin_lock_init(&sbi->s_lock); ret = -EINVAL; @@ -1204,7 +1205,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) failed_mount: brelse(bh); failed_sbi: - fs_put_dax(sbi->s_daxdev); + fs_put_dax(sbi->s_daxdev, NULL); sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 845f2f8aee5f9..1f8bf507ba5ab 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1307,7 +1307,7 @@ static void ext4_put_super(struct super_block *sb) if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); - fs_put_dax(sbi->s_daxdev); + fs_put_dax(sbi->s_daxdev, NULL); fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); #if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); @@ -4272,7 +4272,7 @@ static void ext4_free_sbi(struct ext4_sb_info *sbi) return; kfree(sbi->s_blockgroup_lock); - fs_put_dax(sbi->s_daxdev); + fs_put_dax(sbi->s_daxdev, NULL); kfree(sbi); } @@ -4284,7 +4284,8 @@ static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) if (!sbi) return NULL; - sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off); + sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, + NULL, NULL); sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); @@ -4296,7 +4297,7 @@ static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) sbi->s_sb = sb; return sbi; err_out: - fs_put_dax(sbi->s_daxdev); + fs_put_dax(sbi->s_daxdev, NULL); kfree(sbi); return NULL; } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 4aa9c9cf5b6e7..1ec2a7b6d44e9 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1911,7 +1911,7 @@ xfs_free_buftarg( list_lru_destroy(&btp->bt_lru); blkdev_issue_flush(btp->bt_bdev); - fs_put_dax(btp->bt_daxdev); + fs_put_dax(btp->bt_daxdev, NULL); kmem_free(btp); } @@ -1964,7 +1964,8 @@ xfs_alloc_buftarg( btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; btp->bt_bdev = bdev; - btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off); + btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off, NULL, + NULL); /* * Buffer IO error rate limiting. Limit it to no more than 10 messages diff --git a/include/linux/dax.h b/include/linux/dax.h index e7b81634c52ad..cf85fc36da5fa 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -43,8 +43,21 @@ struct dax_operations { void *addr, size_t bytes, struct iov_iter *iter); }; +struct dax_holder_operations { + /* + * notify_failure - notify memory failure into inner holder device + * @dax_dev: the dax device which contains the holder + * @offset: offset on this dax device where memory failure occurs + * @len: length of this memory failure event + * @flags: action flags for memory failure handler + */ + int (*notify_failure)(struct dax_device *dax_dev, u64 offset, + u64 len, int mf_flags); +}; + #if IS_ENABLED(CONFIG_DAX) struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); +void *dax_holder(struct dax_device *dax_dev); void put_dax(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void dax_write_cache(struct dax_device *dax_dev, bool wc); @@ -66,6 +79,10 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, return dax_synchronous(dax_dev); } #else +static inline void *dax_holder(struct dax_device *dax_dev) +{ + return NULL; +} static inline struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) { @@ -114,12 +131,9 @@ struct writeback_control; #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); void dax_remove_host(struct gendisk *disk); -struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, - u64 *start_off); -static inline void fs_put_dax(struct dax_device *dax_dev) -{ - put_dax(dax_dev); -} +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, + void *holder, const struct dax_holder_operations *ops); +void fs_put_dax(struct dax_device *dax_dev, void *holder); #else static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { @@ -129,11 +143,12 @@ static inline void dax_remove_host(struct gendisk *disk) { } static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, - u64 *start_off) + u64 *start_off, void *holder, + const struct dax_holder_operations *ops) { return NULL; } -static inline void fs_put_dax(struct dax_device *dax_dev) +static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) { } #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ @@ -203,6 +218,8 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages); +int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, + int mf_flags); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, From 1dc0bbe86ccc3c8f9e0ff0001d78dc667699cc54 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:26 +0800 Subject: [PATCH 109/321] mm: factor helpers for memory_failure_dev_pagemap memory_failure_dev_pagemap code is a bit complex before introduce RMAP feature for fsdax. So it is needed to factor some helper functions to simplify these code. Link: https://lkml.kernel.org/r/20220603053738.1218681-3-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Reviewed-by: Dan Williams Reviewed-by: Miaohe Lin Cc: Al Viro Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- mm/memory-failure.c | 167 ++++++++++++++++++++++++-------------------- 1 file changed, 92 insertions(+), 75 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7ca5db7e60ea..ddd636772fe4c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1499,6 +1499,95 @@ static int try_to_split_thp_page(struct page *page, const char *msg) return 0; } +static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, + struct address_space *mapping, pgoff_t index, int flags) +{ + struct to_kill *tk; + unsigned long size = 0; + + list_for_each_entry(tk, to_kill, nd) + if (tk->size_shift) + size = max(size, 1UL << tk->size_shift); + + if (size) { + /* + * Unmap the largest mapping to avoid breaking up device-dax + * mappings which are constant size. The actual size of the + * mapping being torn down is communicated in siginfo, see + * kill_proc() + */ + loff_t start = (index << PAGE_SHIFT) & ~(size - 1); + + unmap_mapping_range(mapping, start, size, 0); + } + + kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); +} + +static int mf_generic_kill_procs(unsigned long long pfn, int flags, + struct dev_pagemap *pgmap) +{ + struct page *page = pfn_to_page(pfn); + LIST_HEAD(to_kill); + dax_entry_t cookie; + int rc = 0; + + /* + * Pages instantiated by device-dax (not filesystem-dax) + * may be compound pages. + */ + page = compound_head(page); + + /* + * Prevent the inode from being freed while we are interrogating + * the address_space, typically this would be handled by + * lock_page(), but dax pages do not use the page lock. This + * also prevents changes to the mapping of this pfn until + * poison signaling is complete. + */ + cookie = dax_lock_page(page); + if (!cookie) + return -EBUSY; + + if (hwpoison_filter(page)) { + rc = -EOPNOTSUPP; + goto unlock; + } + + switch (pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: + /* + * TODO: Handle device pages which may need coordination + * with device-side memory. + */ + rc = -ENXIO; + goto unlock; + default: + break; + } + + /* + * Use this flag as an indication that the dax page has been + * remapped UC to prevent speculative consumption of poison. + */ + SetPageHWPoison(page); + + /* + * Unlike System-RAM there is no possibility to swap in a + * different physical page at a given virtual address, so all + * userspace consumption of ZONE_DEVICE memory necessitates + * SIGBUS (i.e. MF_MUST_KILL) + */ + flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; + collect_procs(page, &to_kill, true); + + unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); +unlock: + dax_unlock_page(page, cookie); + return rc; +} + /* * Called from hugetlb code with hugetlb_lock held. * @@ -1645,12 +1734,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) { struct page *page = pfn_to_page(pfn); - unsigned long size = 0; - struct to_kill *tk; - LIST_HEAD(tokill); - int rc = -EBUSY; - loff_t start; - dax_entry_t cookie; + int rc = -ENXIO; if (flags & MF_COUNT_INCREASED) /* @@ -1659,77 +1743,10 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, put_page(page); /* device metadata space is not recoverable */ - if (!pgmap_pfn_valid(pgmap, pfn)) { - rc = -ENXIO; - goto out; - } - - /* - * Pages instantiated by device-dax (not filesystem-dax) - * may be compound pages. - */ - page = compound_head(page); - - /* - * Prevent the inode from being freed while we are interrogating - * the address_space, typically this would be handled by - * lock_page(), but dax pages do not use the page lock. This - * also prevents changes to the mapping of this pfn until - * poison signaling is complete. - */ - cookie = dax_lock_page(page); - if (!cookie) + if (!pgmap_pfn_valid(pgmap, pfn)) goto out; - if (hwpoison_filter(page)) { - rc = -EOPNOTSUPP; - goto unlock; - } - - switch (pgmap->type) { - case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_COHERENT: - /* - * TODO: Handle device pages which may need coordination - * with device-side memory. - */ - goto unlock; - default: - break; - } - - /* - * Use this flag as an indication that the dax page has been - * remapped UC to prevent speculative consumption of poison. - */ - SetPageHWPoison(page); - - /* - * Unlike System-RAM there is no possibility to swap in a - * different physical page at a given virtual address, so all - * userspace consumption of ZONE_DEVICE memory necessitates - * SIGBUS (i.e. MF_MUST_KILL) - */ - flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; - collect_procs(page, &tokill, true); - - list_for_each_entry(tk, &tokill, nd) - if (tk->size_shift) - size = max(size, 1UL << tk->size_shift); - if (size) { - /* - * Unmap the largest mapping to avoid breaking up - * device-dax mappings which are constant size. The - * actual size of the mapping being torn down is - * communicated in siginfo, see kill_proc() - */ - start = (page->index << PAGE_SHIFT) & ~(size - 1); - unmap_mapping_range(page->mapping, start, size, 0); - } - kill_procs(&tokill, true, false, pfn, flags); - rc = 0; -unlock: - dax_unlock_page(page, cookie); + rc = mf_generic_kill_procs(pfn, flags, pgmap); out: /* drop pgmap ref acquired in caller */ put_dev_pagemap(pgmap); From 99cff1e2da88c719c6f5c0cb330796f132b812d8 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 4 Jun 2022 11:31:06 -0700 Subject: [PATCH 110/321] mm-factor-helpers-for-memory_failure_dev_pagemap-fix fix CONFIG_HUGETLB_PAGE=n build Reported-by: kernel test robot Cc: Shiyang Ruan Signed-off-by: Andrew Morton --- mm/memory-failure.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ddd636772fe4c..ce20e65b4d646 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1723,12 +1723,21 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb unlock_page(head); return res; } + #else + +static inline int mf_generic_kill_procs(unsigned long long pfn, int flags, + struct dev_pagemap *pgmap) +{ + return 0; +} + static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { return 0; } -#endif + +#endif /* CONFIG_HUGETLB_PAGE */ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) From b54bffafe769a08e432b4ba4d747b5bc1dc36fb1 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Tue, 28 Jun 2022 19:21:43 +0800 Subject: [PATCH 111/321] mm/memory-failure: fix redefinition of mf_generic_kill_procs If CONFIG_HUGETLB_PAGE=n, building fails: mm/memory-failure.c:1805:19: error: redefinition of `mf_generic_kill_procs' static inline int mf_generic_kill_procs(unsigned long long pfn, int flags, ^~~~~~~~~~~~~~~~~~~~~ mm/memory-failure.c:1564:12: note: previous definition of `mf_generic_kill_procs' was here static int mf_generic_kill_procs(unsigned long long pfn, int flags, This patch fixes that. Link: https://lkml.kernel.org/r/20220628112143.1170473-1-zhengbin13@huawei.com Fixes: 4184e8d7d056 ("mm-factor-helpers-for-memory_failure_dev_pagemap-fix") Signed-off-by: Zheng Bin Cc: Shiyang Ruan Cc: Naoya Horiguchi Cc: Miaohe Lin Cc: gaochao Signed-off-by: Andrew Morton --- mm/memory-failure.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ce20e65b4d646..f8a8a5d45eba8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1725,13 +1725,6 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb } #else - -static inline int mf_generic_kill_procs(unsigned long long pfn, int flags, - struct dev_pagemap *pgmap) -{ - return 0; -} - static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { return 0; From 078c22486ff9a8bc0a7899786667eba7e4293fd2 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:27 +0800 Subject: [PATCH 112/321] pagemap,pmem: introduce ->memory_failure() When memory-failure occurs, we call this function which is implemented by each kind of devices. For the fsdax case, pmem device driver implements it. Pmem device driver will find out the filesystem in which the corrupted page located in. With dax_holder notify support, we are able to notify the memory failure from pmem driver to upper layers. If there is something not support in the notify routine, memory_failure will fall back to the generic hanlder. Link: https://lkml.kernel.org/r/20220603053738.1218681-4-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Dan Williams Reviewed-by: Darrick J. Wong Reviewed-by: Naoya Horiguchi Cc: Al Viro Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- drivers/nvdimm/pmem.c | 17 +++++++++++++++++ include/linux/memremap.h | 12 ++++++++++++ mm/memory-failure.c | 14 ++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 629d10fcf53b2..107c9cb3d57d7 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -453,6 +453,21 @@ static void pmem_release_disk(void *__pmem) blk_cleanup_disk(pmem->disk); } +static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap, + unsigned long pfn, unsigned long nr_pages, int mf_flags) +{ + struct pmem_device *pmem = + container_of(pgmap, struct pmem_device, pgmap); + u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset; + u64 len = nr_pages << PAGE_SHIFT; + + return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags); +} + +static const struct dev_pagemap_ops fsdax_pagemap_ops = { + .memory_failure = pmem_pagemap_memory_failure, +}; + static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns) { @@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev, pmem->pfn_flags = PFN_DEV; if (is_nd_pfn(dev)) { pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; + pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); pfn_sb = nd_pfn->pfn_sb; pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); @@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev, pmem->pgmap.range.end = res->end; pmem->pgmap.nr_range = 1; pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; + pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 0f22f6f42e7dc..e55db0f2e4e8c 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -23,6 +23,18 @@ struct dev_pagemap_ops { * the page back to a CPU accessible page. */ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); + + /* + * Handle the memory failure happens on a range of pfns. Notify the + * processes who are using these pfns, and try to recover the data on + * them if necessary. The mf_flags is finally passed to the recover + * function through the whole notify routine. + * + * When this is not implemented, or it returns -EOPNOTSUPP, the caller + * will fall back to a common handler called mf_generic_kill_procs(). + */ + int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, + unsigned long nr_pages, int mf_flags); }; #define PGMAP_ALTMAP_VALID (1 << 0) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f8a8a5d45eba8..46c77151f7267 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1748,6 +1748,20 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, if (!pgmap_pfn_valid(pgmap, pfn)) goto out; + /* + * Call driver's implementation to handle the memory failure, otherwise + * fall back to generic handler. + */ + if (pgmap->ops->memory_failure) { + rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); + /* + * Fall back to generic handler too if operation is not + * supported inside the driver/device/filesystem. + */ + if (rc != -EOPNOTSUPP) + goto out; + } + rc = mf_generic_kill_procs(pfn, flags, pgmap); out: /* drop pgmap ref acquired in caller */ From 9fec8937a96a1f049ba6af3330fdb486e081ab3f Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:28 +0800 Subject: [PATCH 113/321] fsdax: introduce dax_lock_mapping_entry() The current dax_lock_page() locks dax entry by obtaining mapping and index in page. To support 1-to-N RMAP in NVDIMM, we need a new function to lock a specific dax entry corresponding to this file's mapping,index. And output the page corresponding to the specific dax entry for caller use. Link: https://lkml.kernel.org/r/20220603053738.1218681-5-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/dax.c | 63 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/dax.h | 15 +++++++++++ 2 files changed, 78 insertions(+) diff --git a/fs/dax.c b/fs/dax.c index 4155a6107fa10..65e44d78b3bb5 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -455,6 +455,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie) dax_unlock_entry(&xas, (void *)cookie); } +/* + * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping + * @mapping: the file's mapping whose entry we want to lock + * @index: the offset within this file + * @page: output the dax page corresponding to this dax entry + * + * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry + * could not be locked. + */ +dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, + struct page **page) +{ + XA_STATE(xas, NULL, 0); + void *entry; + + rcu_read_lock(); + for (;;) { + entry = NULL; + if (!dax_mapping(mapping)) + break; + + xas.xa = &mapping->i_pages; + xas_lock_irq(&xas); + xas_set(&xas, index); + entry = xas_load(&xas); + if (dax_is_locked(entry)) { + rcu_read_unlock(); + wait_entry_unlocked(&xas, entry); + rcu_read_lock(); + continue; + } + if (!entry || + dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + /* + * Because we are looking for entry from file's mapping + * and index, so the entry may not be inserted for now, + * or even a zero/empty entry. We don't think this is + * an error case. So, return a special value and do + * not output @page. + */ + entry = (void *)~0UL; + } else { + *page = pfn_to_page(dax_to_pfn(entry)); + dax_lock_entry(&xas, entry); + } + xas_unlock_irq(&xas); + break; + } + rcu_read_unlock(); + return (dax_entry_t)entry; +} + +void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, + dax_entry_t cookie) +{ + XA_STATE(xas, &mapping->i_pages, index); + + if (cookie == ~0UL) + return; + + dax_unlock_entry(&xas, (void *)cookie); +} + /* * Find page cache entry at given index. If it is a DAX entry, return it * with the entry locked. If the page cache doesn't contain an entry at diff --git a/include/linux/dax.h b/include/linux/dax.h index cf85fc36da5fa..7116681b48c0a 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -161,6 +161,10 @@ struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); dax_entry_t dax_lock_page(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie); +dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, + unsigned long index, struct page **page); +void dax_unlock_mapping_entry(struct address_space *mapping, + unsigned long index, dax_entry_t cookie); #else static inline struct page *dax_layout_busy_page(struct address_space *mapping) { @@ -188,6 +192,17 @@ static inline dax_entry_t dax_lock_page(struct page *page) static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) { } + +static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, + unsigned long index, struct page **page) +{ + return 0; +} + +static inline void dax_unlock_mapping_entry(struct address_space *mapping, + unsigned long index, dax_entry_t cookie) +{ +} #endif int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, From 8bb94153b8179e2d1d72f0494ea2906bc348d23e Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:29 +0800 Subject: [PATCH 114/321] mm: introduce mf_dax_kill_procs() for fsdax case This new function is a variant of mf_generic_kill_procs that accepts a file, offset pair instead of a struct to support multiple files sharing a DAX mapping. It is intended to be called by the file systems as part of the memory_failure handler after the file system performed a reverse mapping from the storage address to the file and file offset. Link: https://lkml.kernel.org/r/20220603053738.1218681-6-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Dan Williams Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Reviewed-by: Miaohe Lin Cc: Al Viro Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 + mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 88 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index b976796ce96fb..d112fea0625a9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3197,6 +3197,8 @@ enum mf_flags { MF_UNPOISON = 1 << 4, MF_SW_SIMULATED = 1 << 5, }; +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, + unsigned long count, int mf_flags); extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 46c77151f7267..c9931c6763356 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -297,10 +297,9 @@ void shake_page(struct page *p) } EXPORT_SYMBOL_GPL(shake_page); -static unsigned long dev_pagemap_mapping_shift(struct page *page, - struct vm_area_struct *vma) +static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, + unsigned long address) { - unsigned long address = vma_address(page, vma); unsigned long ret = 0; pgd_t *pgd; p4d_t *p4d; @@ -340,10 +339,14 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page, /* * Schedule a process for later kill. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. + * + * Notice: @fsdax_pgoff is used only when @p is a fsdax page. + * In other cases, such as anonymous and file-backend page, the address to be + * killed can be caculated by @p itself. */ static void add_to_kill(struct task_struct *tsk, struct page *p, - struct vm_area_struct *vma, - struct list_head *to_kill) + pgoff_t fsdax_pgoff, struct vm_area_struct *vma, + struct list_head *to_kill) { struct to_kill *tk; @@ -354,9 +357,15 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, } tk->addr = page_address_in_vma(p, vma); - if (is_zone_device_page(p)) - tk->size_shift = dev_pagemap_mapping_shift(p, vma); - else + if (is_zone_device_page(p)) { + /* + * Since page->mapping is not used for fsdax, we need + * calculate the address based on the vma. + */ + if (p->pgmap->type == MEMORY_DEVICE_FS_DAX) + tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); + tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); + } else tk->size_shift = page_shift(compound_head(p)); /* @@ -505,7 +514,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, if (!page_mapped_in_vma(page, vma)) continue; if (vma->vm_mm == t->mm) - add_to_kill(t, page, vma, to_kill); + add_to_kill(t, page, 0, vma, to_kill); } } read_unlock(&tasklist_lock); @@ -541,13 +550,41 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, * to be informed of all such data corruptions. */ if (vma->vm_mm == t->mm) - add_to_kill(t, page, vma, to_kill); + add_to_kill(t, page, 0, vma, to_kill); } } read_unlock(&tasklist_lock); i_mmap_unlock_read(mapping); } +#ifdef CONFIG_FS_DAX +/* + * Collect processes when the error hit a fsdax page. + */ +static void collect_procs_fsdax(struct page *page, + struct address_space *mapping, pgoff_t pgoff, + struct list_head *to_kill) +{ + struct vm_area_struct *vma; + struct task_struct *tsk; + + i_mmap_lock_read(mapping); + read_lock(&tasklist_lock); + for_each_process(tsk) { + struct task_struct *t = task_early_kill(tsk, true); + + if (!t) + continue; + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { + if (vma->vm_mm == t->mm) + add_to_kill(t, page, pgoff, vma, to_kill); + } + } + read_unlock(&tasklist_lock); + i_mmap_unlock_read(mapping); +} +#endif /* CONFIG_FS_DAX */ + /* * Collect the processes who have the corrupted page mapped to kill. */ @@ -1588,6 +1625,45 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, return rc; } +#ifdef CONFIG_FS_DAX +/** + * mf_dax_kill_procs - Collect and kill processes who are using this file range + * @mapping: address_space of the file in use + * @index: start pgoff of the range within the file + * @count: length of the range, in unit of PAGE_SIZE + * @mf_flags: memory failure flags + */ +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, + unsigned long count, int mf_flags) +{ + LIST_HEAD(to_kill); + dax_entry_t cookie; + struct page *page; + size_t end = index + count; + + mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; + + for (; index < end; index++) { + page = NULL; + cookie = dax_lock_mapping_entry(mapping, index, &page); + if (!cookie) + return -EBUSY; + if (!page) + goto unlock; + + SetPageHWPoison(page); + + collect_procs_fsdax(page, mapping, index, &to_kill); + unmap_and_kill(&to_kill, page_to_pfn(page), mapping, + index, mf_flags); +unlock: + dax_unlock_mapping_entry(mapping, index, cookie); + } + return 0; +} +EXPORT_SYMBOL_GPL(mf_dax_kill_procs); +#endif /* CONFIG_FS_DAX */ + /* * Called from hugetlb code with hugetlb_lock held. * From 10aa601bf3c12b80a9defd548c51a2960438c39d Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:30 +0800 Subject: [PATCH 115/321] xfs: implement ->notify_failure() for XFS Introduce xfs_notify_failure.c to handle failure related works, such as implement ->notify_failure(), register/unregister dax holder in xfs, and so on. If the rmap feature of XFS enabled, we can query it to find files and metadata which are associated with the corrupt data. For now all we do is kill processes with that file mapped into their address spaces, but future patches could actually do something about corrupt metadata. After that, the memory failure needs to notify the processes who are using those files. Link: https://lkml.kernel.org/r/20220603053738.1218681-7-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/xfs/Makefile | 5 + fs/xfs/xfs_buf.c | 11 +- fs/xfs/xfs_fsops.c | 3 + fs/xfs/xfs_mount.h | 1 + fs/xfs/xfs_notify_failure.c | 220 ++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_super.h | 1 + 6 files changed, 238 insertions(+), 3 deletions(-) create mode 100644 fs/xfs/xfs_notify_failure.c diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index b056cfc6398e0..805a0d0a88c10 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -129,6 +129,11 @@ xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o xfs-$(CONFIG_EXPORTFS_BLOCK_OPS) += xfs_pnfs.o +# notify failure +ifeq ($(CONFIG_MEMORY_FAILURE),y) +xfs-$(CONFIG_FS_DAX) += xfs_notify_failure.o +endif + # online scrub/repair ifeq ($(CONFIG_XFS_ONLINE_SCRUB),y) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1ec2a7b6d44e9..59c6b62fde57e 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -5,6 +5,7 @@ */ #include "xfs.h" #include +#include #include "xfs_shared.h" #include "xfs_format.h" @@ -1911,7 +1912,7 @@ xfs_free_buftarg( list_lru_destroy(&btp->bt_lru); blkdev_issue_flush(btp->bt_bdev); - fs_put_dax(btp->bt_daxdev, NULL); + fs_put_dax(btp->bt_daxdev, btp->bt_mount); kmem_free(btp); } @@ -1958,14 +1959,18 @@ xfs_alloc_buftarg( struct block_device *bdev) { xfs_buftarg_t *btp; + const struct dax_holder_operations *ops = NULL; +#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE) + ops = &xfs_dax_holder_operations; +#endif btp = kmem_zalloc(sizeof(*btp), KM_NOFS); btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; btp->bt_bdev = bdev; - btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off, NULL, - NULL); + btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off, + mp, ops); /* * Buffer IO error rate limiting. Limit it to no more than 10 messages diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index d4a77c53f94ba..0bc74a71a0952 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -528,6 +528,9 @@ xfs_do_force_shutdown( } else if (flags & SHUTDOWN_CORRUPT_INCORE) { tag = XFS_PTAG_SHUTDOWN_CORRUPT; why = "Corruption of in-memory data"; + } else if (flags & SHUTDOWN_CORRUPT_ONDISK) { + tag = XFS_PTAG_SHUTDOWN_CORRUPT; + why = "Corruption of on-disk metadata"; } else { tag = XFS_PTAG_SHUTDOWN_IOERROR; why = "Metadata I/O Error"; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index ba5d42abf66e0..32a67f78fdd10 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -454,6 +454,7 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname, #define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */ #define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */ #define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */ +#define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */ #define XFS_SHUTDOWN_STRINGS \ { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \ diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c new file mode 100644 index 0000000000000..aa8dc27c599ce --- /dev/null +++ b/fs/xfs/xfs_notify_failure.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Fujitsu. All Rights Reserved. + */ + +#include "xfs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_btree.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_rtalloc.h" +#include "xfs_trans.h" + +#include +#include + +struct failure_info { + xfs_agblock_t startblock; + xfs_extlen_t blockcount; + int mf_flags; +}; + +static pgoff_t +xfs_failure_pgoff( + struct xfs_mount *mp, + const struct xfs_rmap_irec *rec, + const struct failure_info *notify) +{ + loff_t pos = XFS_FSB_TO_B(mp, rec->rm_offset); + + if (notify->startblock > rec->rm_startblock) + pos += XFS_FSB_TO_B(mp, + notify->startblock - rec->rm_startblock); + return pos >> PAGE_SHIFT; +} + +static unsigned long +xfs_failure_pgcnt( + struct xfs_mount *mp, + const struct xfs_rmap_irec *rec, + const struct failure_info *notify) +{ + xfs_agblock_t end_rec; + xfs_agblock_t end_notify; + xfs_agblock_t start_cross; + xfs_agblock_t end_cross; + + start_cross = max(rec->rm_startblock, notify->startblock); + + end_rec = rec->rm_startblock + rec->rm_blockcount; + end_notify = notify->startblock + notify->blockcount; + end_cross = min(end_rec, end_notify); + + return XFS_FSB_TO_B(mp, end_cross - start_cross) >> PAGE_SHIFT; +} + +static int +xfs_dax_failure_fn( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *data) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_inode *ip; + struct failure_info *notify = data; + int error = 0; + + if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || + (rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))) { + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); + return -EFSCORRUPTED; + } + + /* Get files that incore, filter out others that are not in use. */ + error = xfs_iget(mp, cur->bc_tp, rec->rm_owner, XFS_IGET_INCORE, + 0, &ip); + /* Continue the rmap query if the inode isn't incore */ + if (error == -ENODATA) + return 0; + if (error) + return error; + + error = mf_dax_kill_procs(VFS_I(ip)->i_mapping, + xfs_failure_pgoff(mp, rec, notify), + xfs_failure_pgcnt(mp, rec, notify), + notify->mf_flags); + xfs_irele(ip); + return error; +} + +static int +xfs_dax_notify_ddev_failure( + struct xfs_mount *mp, + xfs_daddr_t daddr, + xfs_daddr_t bblen, + int mf_flags) +{ + struct xfs_trans *tp = NULL; + struct xfs_btree_cur *cur = NULL; + struct xfs_buf *agf_bp = NULL; + int error = 0; + xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr); + xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); + xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp, daddr + bblen); + xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno); + + error = xfs_trans_alloc_empty(mp, &tp); + if (error) + return error; + + for (; agno <= end_agno; agno++) { + struct xfs_rmap_irec ri_low = { }; + struct xfs_rmap_irec ri_high; + struct failure_info notify; + struct xfs_agf *agf; + xfs_agblock_t agend; + + error = xfs_alloc_read_agf(mp, tp, agno, 0, &agf_bp); + if (error) + break; + + cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, agf_bp->b_pag); + + /* + * Set the rmap range from ri_low to ri_high, which represents + * a [start, end] where we looking for the files or metadata. + */ + memset(&ri_high, 0xFF, sizeof(ri_high)); + ri_low.rm_startblock = XFS_FSB_TO_AGBNO(mp, fsbno); + if (agno == end_agno) + ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno); + + agf = agf_bp->b_addr; + agend = min(be32_to_cpu(agf->agf_length), + ri_high.rm_startblock); + notify.startblock = ri_low.rm_startblock; + notify.blockcount = agend - ri_low.rm_startblock; + + error = xfs_rmap_query_range(cur, &ri_low, &ri_high, + xfs_dax_failure_fn, ¬ify); + xfs_btree_del_cursor(cur, error); + xfs_trans_brelse(tp, agf_bp); + if (error) + break; + + fsbno = XFS_AGB_TO_FSB(mp, agno + 1, 0); + } + + xfs_trans_cancel(tp); + return error; +} + +static int +xfs_dax_notify_failure( + struct dax_device *dax_dev, + u64 offset, + u64 len, + int mf_flags) +{ + struct xfs_mount *mp = dax_holder(dax_dev); + u64 ddev_start; + u64 ddev_end; + + if (!(mp->m_sb.sb_flags & SB_BORN)) { + xfs_warn(mp, "filesystem is not ready for notify_failure()!"); + return -EIO; + } + + if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) { + xfs_warn(mp, + "notify_failure() not supported on realtime device!"); + return -EOPNOTSUPP; + } + + if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev && + mp->m_logdev_targp != mp->m_ddev_targp) { + xfs_err(mp, "ondisk log corrupt, shutting down fs!"); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); + return -EFSCORRUPTED; + } + + if (!xfs_has_rmapbt(mp)) { + xfs_warn(mp, "notify_failure() needs rmapbt enabled!"); + return -EOPNOTSUPP; + } + + ddev_start = mp->m_ddev_targp->bt_dax_part_off; + ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1; + + /* Ignore the range out of filesystem area */ + if (offset + len < ddev_start) + return -ENXIO; + if (offset > ddev_end) + return -ENXIO; + + /* Calculate the real range when it touches the boundary */ + if (offset > ddev_start) + offset -= ddev_start; + else { + len -= ddev_start - offset; + offset = 0; + } + if (offset + len > ddev_end) + len -= ddev_end - offset; + + return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len), + mf_flags); +} + +const struct dax_holder_operations xfs_dax_holder_operations = { + .notify_failure = xfs_dax_notify_failure, +}; diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h index 3cd5a51bace13..364e2c2648a8a 100644 --- a/fs/xfs/xfs_super.h +++ b/fs/xfs/xfs_super.h @@ -92,6 +92,7 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *, extern const struct export_operations xfs_export_operations; extern const struct quotactl_ops xfs_quotactl_operations; +extern const struct dax_holder_operations xfs_dax_holder_operations; extern void xfs_reinit_percpu_counters(struct xfs_mount *mp); From 3db37a2c328ad628cc1857240082b1dde3f8607e Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:31 +0800 Subject: [PATCH 116/321] fsdax: set a CoW flag when associate reflink mappings Introduce a PAGE_MAPPING_DAX_COW flag to support association with CoW file mappings. In this case, since the dax-rmap has already took the responsibility to look up for shared files by given dax page, the page->mapping is no longer to used for rmap but for marking that this dax page is shared. And to make sure disassociation works fine, we use page->index as refcount, and clear page->mapping to the initial state when page->index is decreased to 0. With the help of this new flag, it is able to distinguish normal case and CoW case, and keep the warning in normal case. Link: https://lkml.kernel.org/r/20220603053738.1218681-8-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/dax.c | 50 +++++++++++++++++++++++++++++++------- include/linux/page-flags.h | 6 +++++ 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 65e44d78b3bb5..b59b864017ad9 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -334,13 +334,35 @@ static unsigned long dax_end_pfn(void *entry) for (pfn = dax_to_pfn(entry); \ pfn < dax_end_pfn(entry); pfn++) +static inline bool dax_mapping_is_cow(struct address_space *mapping) +{ + return (unsigned long)mapping == PAGE_MAPPING_DAX_COW; +} + /* - * TODO: for reflink+dax we need a way to associate a single page with - * multiple address_space instances at different linear_page_index() - * offsets. + * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount. + */ +static inline void dax_mapping_set_cow(struct page *page) +{ + if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) { + /* + * Reset the index if the page was already mapped + * regularly before. + */ + if (page->mapping) + page->index = 1; + page->mapping = (void *)PAGE_MAPPING_DAX_COW; + } + page->index++; +} + +/* + * When it is called in dax_insert_entry(), the cow flag will indicate that + * whether this entry is shared by multiple files. If so, set the page->mapping + * FS_DAX_MAPPING_COW, and use page->index as refcount. */ static void dax_associate_entry(void *entry, struct address_space *mapping, - struct vm_area_struct *vma, unsigned long address) + struct vm_area_struct *vma, unsigned long address, bool cow) { unsigned long size = dax_entry_size(entry), pfn, index; int i = 0; @@ -352,9 +374,13 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); - WARN_ON_ONCE(page->mapping); - page->mapping = mapping; - page->index = index + i++; + if (cow) { + dax_mapping_set_cow(page); + } else { + WARN_ON_ONCE(page->mapping); + page->mapping = mapping; + page->index = index + i++; + } } } @@ -370,7 +396,12 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, struct page *page = pfn_to_page(pfn); WARN_ON_ONCE(trunc && page_ref_count(page) > 1); - WARN_ON_ONCE(page->mapping && page->mapping != mapping); + if (dax_mapping_is_cow(page->mapping)) { + /* keep the CoW flag if this page is still shared */ + if (page->index-- > 0) + continue; + } else + WARN_ON_ONCE(page->mapping && page->mapping != mapping); page->mapping = NULL; page->index = 0; } @@ -830,7 +861,8 @@ static void *dax_insert_entry(struct xa_state *xas, void *old; dax_disassociate_entry(entry, mapping, false); - dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); + dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, + false); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 82719d33c0f1d..f2ff65f1bf838 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -661,6 +661,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +/* + * Different with flags above, this flag is used only for fsdax mode. It + * indicates that this page->mapping is now under reflink case. + */ +#define PAGE_MAPPING_DAX_COW 0x1 + static __always_inline bool folio_mapping_flags(struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; From 2dfe2dea5ba3b95ebc01dc0a00b73e421d65e9bd Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:32 +0800 Subject: [PATCH 117/321] fsdax: output address in dax_iomap_pfn() and rename it Add address output in dax_iomap_pfn() in order to perform a memcpy() in CoW case. Since this function both output address and pfn, rename it to dax_iomap_direct_access(). Link: https://lkml.kernel.org/r/20220603053738.1218681-9-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Ritesh Harjani Reviewed-by: Dan Williams Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- fs/dax.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index b59b864017ad9..ab659c9f142a5 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1026,8 +1026,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); -static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, - pfn_t *pfnp) +static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, + size_t size, void **kaddr, pfn_t *pfnp) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id, rc; @@ -1035,11 +1035,13 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), - DAX_ACCESS, NULL, pfnp); + DAX_ACCESS, kaddr, pfnp); if (length < 0) { rc = length; goto out; } + if (!pfnp) + goto out_check_addr; rc = -EINVAL; if (PFN_PHYS(length) < size) goto out; @@ -1049,6 +1051,12 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, if (length > 1 && !pfn_t_devmap(*pfnp)) goto out; rc = 0; + +out_check_addr: + if (!kaddr) + goto out; + if (!*kaddr) + rc = -EFAULT; out: dax_read_unlock(id); return rc; @@ -1456,7 +1464,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; } - err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn); + err = dax_iomap_direct_access(&iter->iomap, pos, size, NULL, &pfn); if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); From 600089baa6a27d1dd69dca68842d3f34004aaa9f Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Tue, 7 Jun 2022 22:38:37 +0800 Subject: [PATCH 118/321] fsdax-output-address-in-dax_iomap_pfn-and-rename-it-v21 initialize `rc', per Dan. Link: https://lore.kernel.org/linux-fsdevel/Yp8FUZnO64Qvyx5G@kili/ Link: https://lkml.kernel.org/r/20220607143837.161174-1-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Ritesh Harjani Reviewed-by: Dan Williams Reviewed-by: Darrick J. Wong Signed-off-by: Andrew Morton --- fs/dax.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dax.c b/fs/dax.c index ab659c9f142a5..7a8eb1e30a1b6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1030,7 +1030,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, pfn_t *pfnp) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); - int id, rc; + int id, rc = 0; long length; id = dax_read_lock(); From aa9abf1f3019570efb3fbf562503ce8f5a145bd2 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:33 +0800 Subject: [PATCH 119/321] fsdax: introduce dax_iomap_cow_copy() In the case where the iomap is a write operation and iomap is not equal to srcmap after iomap_begin, we consider it is a CoW operation. In this case, the destination (iomap->addr) points to a newly allocated extent. It is needed to copy the data from srcmap to the extent. In theory, it is better to copy the head and tail ranges which is outside of the non-aligned area instead of copying the whole aligned range. But in dax page fault, it will always be an aligned range. So copy the whole range in this case. Link: https://lkml.kernel.org/r/20220603053738.1218681-10-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/dax.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 83 insertions(+), 5 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 7a8eb1e30a1b6..6a353838070d4 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1062,6 +1062,60 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, return rc; } +/** + * dax_iomap_cow_copy - Copy the data from source to destination before write + * @pos: address to do copy from. + * @length: size of copy operation. + * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) + * @srcmap: iomap srcmap + * @daddr: destination address to copy to. + * + * This can be called from two places. Either during DAX write fault (page + * aligned), to copy the length size data to daddr. Or, while doing normal DAX + * write operation, dax_iomap_actor() might call this to do the copy of either + * start or end unaligned address. In the latter case the rest of the copy of + * aligned ranges is taken care by dax_iomap_actor() itself. + */ +static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size, + const struct iomap *srcmap, void *daddr) +{ + loff_t head_off = pos & (align_size - 1); + size_t size = ALIGN(head_off + length, align_size); + loff_t end = pos + length; + loff_t pg_end = round_up(end, align_size); + bool copy_all = head_off == 0 && end == pg_end; + void *saddr = 0; + int ret = 0; + + ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); + if (ret) + return ret; + + if (copy_all) { + ret = copy_mc_to_kernel(daddr, saddr, length); + return ret ? -EIO : 0; + } + + /* Copy the head part of the range */ + if (head_off) { + ret = copy_mc_to_kernel(daddr, saddr, head_off); + if (ret) + return -EIO; + } + + /* Copy the tail part of the range */ + if (end < pg_end) { + loff_t tail_off = head_off + length; + loff_t tail_len = pg_end - end; + + ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, + tail_len); + if (ret) + return -EIO; + } + return 0; +} + /* * The user has performed a load from a hole in the file. Allocating a new * page in the file would cause excessive storage usage for workloads with @@ -1232,15 +1286,17 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, struct iov_iter *iter) { const struct iomap *iomap = &iomi->iomap; + const struct iomap *srcmap = &iomi->srcmap; loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; struct dax_device *dax_dev = iomap->dax_dev; loff_t end = pos + length, done = 0; + bool write = iov_iter_rw(iter) == WRITE; ssize_t ret = 0; size_t xfer; int id; - if (iov_iter_rw(iter) == READ) { + if (!write) { end = min(end, i_size_read(iomi->inode)); if (pos >= end) return 0; @@ -1249,7 +1305,12 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, return iov_iter_zero(min(length, end - pos), iter); } - if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) + /* + * In DAX mode, enforce either pure overwrites of written extents, or + * writes to unwritten extents as part of a copy-on-write operation. + */ + if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && + !(iomap->flags & IOMAP_F_SHARED))) return -EIO; /* @@ -1291,6 +1352,14 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, break; } + if (write && + srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) { + ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap, + kaddr); + if (ret) + break; + } + map_len = PFN_PHYS(map_len); kaddr += offset; map_len -= offset; @@ -1300,7 +1369,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi, if (recovery) xfer = dax_recovery_write(dax_dev, pgoff, kaddr, map_len, iter); - else if (iov_iter_rw(iter) == WRITE) + else if (write) xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, map_len, iter); else @@ -1440,6 +1509,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, { struct address_space *mapping = vmf->vma->vm_file->f_mapping; const struct iomap *iomap = &iter->iomap; + const struct iomap *srcmap = &iter->srcmap; size_t size = pmd ? PMD_SIZE : PAGE_SIZE; loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = vmf->flags & FAULT_FLAG_WRITE; @@ -1447,6 +1517,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, unsigned long entry_flags = pmd ? DAX_PMD : 0; int err = 0; pfn_t pfn; + void *kaddr; if (!pmd && vmf->cow_page) return dax_fault_cow_page(vmf, iter); @@ -1459,18 +1530,25 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return dax_pmd_load_hole(xas, vmf, iomap, entry); } - if (iomap->type != IOMAP_MAPPED) { + if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { WARN_ON_ONCE(1); return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; } - err = dax_iomap_direct_access(&iter->iomap, pos, size, NULL, &pfn); + err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, write && !sync); + if (write && + srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) { + err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr); + if (err) + return dax_fault_return(err); + } + if (sync) return dax_fault_synchronous_pfnp(pfnp, pfn); From 2fb68e51ce8ef4ec54f6269ed89c298f6fc0f98a Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:34 +0800 Subject: [PATCH 120/321] fsdax: replace mmap entry in case of CoW Replace the existing entry to the newly allocated one in case of CoW. Also, we mark the entry as PAGECACHE_TAG_TOWRITE so writeback marks this entry as writeprotected. This helps us snapshots so new write pagefaults after snapshots trigger a CoW. Link: https://lkml.kernel.org/r/20220603053738.1218681-11-ruansy.fnst@fujitsu.com Signed-off-by: Goldwyn Rodrigues Signed-off-by: Shiyang Ruan Reviewed-by: Christoph Hellwig Reviewed-by: Ritesh Harjani Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- fs/dax.c | 77 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 6a353838070d4..04fee15693287 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -829,6 +829,23 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter return 0; } +/* + * MAP_SYNC on a dax mapping guarantees dirty metadata is + * flushed on write-faults (non-cow), but not read-faults. + */ +static bool dax_fault_is_synchronous(const struct iomap_iter *iter, + struct vm_area_struct *vma) +{ + return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && + (iter->iomap.flags & IOMAP_F_DIRTY); +} + +static bool dax_fault_is_cow(const struct iomap_iter *iter) +{ + return (iter->flags & IOMAP_WRITE) && + (iter->iomap.flags & IOMAP_F_SHARED); +} + /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to @@ -836,16 +853,19 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter * already in the tree, we will skip the insertion and just dirty the PMD as * appropriate. */ -static void *dax_insert_entry(struct xa_state *xas, - struct address_space *mapping, struct vm_fault *vmf, - void *entry, pfn_t pfn, unsigned long flags, bool dirty) +static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, + const struct iomap_iter *iter, void *entry, pfn_t pfn, + unsigned long flags) { + struct address_space *mapping = vmf->vma->vm_file->f_mapping; void *new_entry = dax_make_entry(pfn, flags); + bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); + bool cow = dax_fault_is_cow(iter); if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { + if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) @@ -857,12 +877,12 @@ static void *dax_insert_entry(struct xa_state *xas, xas_reset(xas); xas_lock_irq(xas); - if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { void *old; dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, - false); + cow); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -882,6 +902,9 @@ static void *dax_insert_entry(struct xa_state *xas, if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); + if (cow) + xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); + xas_unlock_irq(xas); return entry; } @@ -1123,17 +1146,15 @@ static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size, * If this page is ever written to we will re-fault and change the mapping to * point to real DAX storage instead. */ -static vm_fault_t dax_load_hole(struct xa_state *xas, - struct address_space *mapping, void **entry, - struct vm_fault *vmf) +static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, + const struct iomap_iter *iter, void **entry) { - struct inode *inode = mapping->host; + struct inode *inode = iter->inode; unsigned long vaddr = vmf->address; pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vm_fault_t ret; - *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_ZERO_PAGE, false); + *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); trace_dax_load_hole(inode, vmf, ret); @@ -1142,7 +1163,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, #ifdef CONFIG_FS_DAX_PMD static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, - const struct iomap *iomap, void **entry) + const struct iomap_iter *iter, void **entry) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; unsigned long pmd_addr = vmf->address & PMD_MASK; @@ -1160,8 +1181,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, goto fallback; pfn = page_to_pfn_t(zero_page); - *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_PMD | DAX_ZERO_PAGE, false); + *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, + DAX_PMD | DAX_ZERO_PAGE); if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); @@ -1194,7 +1215,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, } #else static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, - const struct iomap *iomap, void **entry) + const struct iomap_iter *iter, void **entry) { return VM_FAULT_FALLBACK; } @@ -1439,17 +1460,6 @@ static vm_fault_t dax_fault_return(int error) return vmf_error(error); } -/* - * MAP_SYNC on a dax mapping guarantees dirty metadata is - * flushed on write-faults (non-cow), but not read-faults. - */ -static bool dax_fault_is_synchronous(unsigned long flags, - struct vm_area_struct *vma, const struct iomap *iomap) -{ - return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) - && (iomap->flags & IOMAP_F_DIRTY); -} - /* * When handling a synchronous page fault and the inode need a fsync, we can * insert the PTE/PMD into page tables only after that fsync happened. Skip @@ -1507,13 +1517,11 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) { - struct address_space *mapping = vmf->vma->vm_file->f_mapping; const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = &iter->srcmap; size_t size = pmd ? PMD_SIZE : PAGE_SIZE; loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; - bool write = vmf->flags & FAULT_FLAG_WRITE; - bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap); + bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; int err = 0; pfn_t pfn; @@ -1526,8 +1534,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, if (!write && (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { if (!pmd) - return dax_load_hole(xas, mapping, entry, vmf); - return dax_pmd_load_hole(xas, vmf, iomap, entry); + return dax_load_hole(xas, vmf, iter, entry); + return dax_pmd_load_hole(xas, vmf, iter, entry); } if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { @@ -1539,8 +1547,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); - *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, - write && !sync); + *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); if (write && srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) { @@ -1549,7 +1556,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return dax_fault_return(err); } - if (sync) + if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); /* insert PMD pfn */ From 4d46223a24625eed94159ea1f827bcc2046b3aa0 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:35 +0800 Subject: [PATCH 121/321] fsdax: add dax_iomap_cow_copy() for dax zero Punch hole on a reflinked file needs dax_iomap_cow_copy() too. Otherwise, data in not aligned area will be not correct. So, add the CoW operation for not aligned case in dax_memzero(). Link: https://lkml.kernel.org/r/20220603053738.1218681-12-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Ritesh Harjani Reviewed-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- fs/dax.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 04fee15693287..0aab323005313 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1221,17 +1221,28 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, } #endif /* CONFIG_FS_DAX_PMD */ -static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, - unsigned int offset, size_t size) +static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) { + const struct iomap *iomap = &iter->iomap; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + unsigned offset = offset_in_page(pos); + pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); void *kaddr; long ret; - ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); - if (ret > 0) { - memset(kaddr + offset, 0, size); - dax_flush(dax_dev, kaddr + offset, size); - } + ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, + NULL); + if (ret < 0) + return ret; + memset(kaddr + offset, 0, size); + if (srcmap->addr != iomap->addr) { + ret = dax_iomap_cow_copy(pos, size, PAGE_SIZE, srcmap, + kaddr); + if (ret < 0) + return ret; + dax_flush(iomap->dax_dev, kaddr, PAGE_SIZE); + } else + dax_flush(iomap->dax_dev, kaddr + offset, size); return ret; } @@ -1258,7 +1269,7 @@ static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); else - rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); + rc = dax_memzero(iter, pos, size); dax_read_unlock(id); if (rc < 0) From 90802f1b5626c34042b32dc5e7d481791b4c3ba1 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:36 +0800 Subject: [PATCH 122/321] fsdax: dedup file range to use a compare function With dax we cannot deal with readpage() etc. So, we create a dax comparison function which is similar with vfs_dedupe_file_range_compare(). And introduce dax_remap_file_range_prep() for filesystem use. Link: https://lkml.kernel.org/r/20220603053738.1218681-13-ruansy.fnst@fujitsu.com Signed-off-by: Goldwyn Rodrigues Signed-off-by: Shiyang Ruan Reviewed-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/dax.c | 82 ++++++++++++++++++++++++++++++++++++++++++++ fs/remap_range.c | 31 ++++++++++++++--- fs/xfs/xfs_reflink.c | 8 +++-- include/linux/dax.h | 8 +++++ include/linux/fs.h | 12 ++++--- 5 files changed, 130 insertions(+), 11 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 0aab323005313..e0f9c4a0a0c1d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1873,3 +1873,85 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, return dax_insert_pfn_mkwrite(vmf, pfn, order); } EXPORT_SYMBOL_GPL(dax_finish_sync_fault); + +static loff_t dax_range_compare_iter(struct iomap_iter *it_src, + struct iomap_iter *it_dest, u64 len, bool *same) +{ + const struct iomap *smap = &it_src->iomap; + const struct iomap *dmap = &it_dest->iomap; + loff_t pos1 = it_src->pos, pos2 = it_dest->pos; + void *saddr, *daddr; + int id, ret; + + len = min(len, min(smap->length, dmap->length)); + + if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { + *same = true; + return len; + } + + if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { + *same = false; + return 0; + } + + id = dax_read_lock(); + ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), + &saddr, NULL); + if (ret < 0) + goto out_unlock; + + ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), + &daddr, NULL); + if (ret < 0) + goto out_unlock; + + *same = !memcmp(saddr, daddr, len); + if (!*same) + len = 0; + dax_read_unlock(id); + return len; + +out_unlock: + dax_read_unlock(id); + return -EIO; +} + +int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dst, loff_t dstoff, loff_t len, bool *same, + const struct iomap_ops *ops) +{ + struct iomap_iter src_iter = { + .inode = src, + .pos = srcoff, + .len = len, + .flags = IOMAP_DAX, + }; + struct iomap_iter dst_iter = { + .inode = dst, + .pos = dstoff, + .len = len, + .flags = IOMAP_DAX, + }; + int ret; + + while ((ret = iomap_iter(&src_iter, ops)) > 0) { + while ((ret = iomap_iter(&dst_iter, ops)) > 0) { + dst_iter.processed = dax_range_compare_iter(&src_iter, + &dst_iter, len, same); + } + if (ret <= 0) + src_iter.processed = ret; + } + return ret; +} + +int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *ops) +{ + return __generic_remap_file_range_prep(file_in, pos_in, file_out, + pos_out, len, remap_flags, ops); +} +EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); diff --git a/fs/remap_range.c b/fs/remap_range.c index e112b5424cdb9..231de627c1b94 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "internal.h" #include @@ -271,9 +272,11 @@ static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff, * If there's an error, then the usual negative error code is returned. * Otherwise returns 0 with *len set to the request length. */ -int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *len, unsigned int remap_flags) +int +__generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *dax_read_ops) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); @@ -333,8 +336,18 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, if (remap_flags & REMAP_FILE_DEDUP) { bool is_same = false; - ret = vfs_dedupe_file_range_compare(file_in, pos_in, - file_out, pos_out, *len, &is_same); + if (*len == 0) + return 0; + + if (!IS_DAX(inode_in)) + ret = vfs_dedupe_file_range_compare(file_in, pos_in, + file_out, pos_out, *len, &is_same); + else if (dax_read_ops) + ret = dax_dedupe_file_range_compare(inode_in, pos_in, + inode_out, pos_out, *len, &is_same, + dax_read_ops); + else + return -EINVAL; if (ret) return ret; if (!is_same) @@ -352,6 +365,14 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, return ret; } + +int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags) +{ + return __generic_remap_file_range_prep(file_in, pos_in, file_out, + pos_out, len, remap_flags, NULL); +} EXPORT_SYMBOL(generic_remap_file_range_prep); loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index e7a7c00d93be1..cbaf36d210202 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1367,8 +1367,12 @@ xfs_reflink_remap_prep( if (IS_DAX(inode_in) || IS_DAX(inode_out)) goto out_unlock; - ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, - len, remap_flags); + if (!IS_DAX(inode_in)) + ret = generic_remap_file_range_prep(file_in, pos_in, file_out, + pos_out, len, remap_flags); + else + ret = dax_remap_file_range_prep(file_in, pos_in, file_out, + pos_out, len, remap_flags, &xfs_read_iomap_ops); if (ret || *len == 0) goto out_unlock; diff --git a/include/linux/dax.h b/include/linux/dax.h index 7116681b48c0a..ba985333e26bf 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -246,6 +246,14 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dest, loff_t destoff, + loff_t len, bool *is_same, + const struct iomap_ops *ops); +int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *ops); static inline bool dax_mapping(struct address_space *mapping) { return mapping->host && IS_DAX(mapping->host); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ad5e3520fae5..134e9d7ad5d68 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -74,6 +74,7 @@ struct fsverity_operations; struct fs_context; struct fs_parameter_spec; struct fileattr; +struct iomap_ops; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -2070,10 +2071,13 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags); -extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *count, - unsigned int remap_flags); +int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *dax_read_ops); +int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, unsigned int remap_flags); extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); From 45026730e21c30ff985ca706dea0a780e8ec02f0 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:37 +0800 Subject: [PATCH 123/321] xfs: support CoW in fsdax mode In fsdax mode, WRITE and ZERO on a shared extent need CoW performed. After that, new allocated extents needs to be remapped to the file. So, add a CoW identification in ->iomap_begin(), and implement ->iomap_end() to do the remapping work. Link: https://lkml.kernel.org/r/20220603053738.1218681-14-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Christoph Hellwig Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/xfs/xfs_file.c | 33 ++++++++++++++++++++++++++++----- fs/xfs/xfs_iomap.c | 30 +++++++++++++++++++++++++++++- fs/xfs/xfs_iomap.h | 1 + 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 5a171c0b244b7..8bf1055d164ec 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -25,6 +25,7 @@ #include "xfs_iomap.h" #include "xfs_reflink.h" +#include #include #include #include @@ -669,7 +670,7 @@ xfs_file_dax_write( pos = iocb->ki_pos; trace_xfs_file_dax_write(iocb, from); - ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops); + ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops); if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { i_size_write(inode, iocb->ki_pos); error = xfs_setfilesize(ip, pos, ret); @@ -1254,6 +1255,31 @@ xfs_file_llseek( return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); } +#ifdef CONFIG_FS_DAX +int +xfs_dax_fault( + struct vm_fault *vmf, + enum page_entry_size pe_size, + bool write_fault, + pfn_t *pfn) +{ + return dax_iomap_fault(vmf, pe_size, pfn, NULL, + (write_fault && !vmf->cow_page) ? + &xfs_dax_write_iomap_ops : + &xfs_read_iomap_ops); +} +#else +int +xfs_dax_fault( + struct vm_fault *vmf, + enum page_entry_size pe_size, + bool write_fault, + pfn_t *pfn) +{ + return 0; +} +#endif + /* * Locking for serialisation of IO during page faults. This results in a lock * ordering of: @@ -1285,10 +1311,7 @@ __xfs_filemap_fault( pfn_t pfn; xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, - (write_fault && !vmf->cow_page) ? - &xfs_direct_write_iomap_ops : - &xfs_read_iomap_ops); + ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn); if (ret & VM_FAULT_NEEDDSYNC) ret = dax_finish_sync_fault(vmf, pe_size, pfn); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 5a393259a3a38..4c07f5e718fb1 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -773,7 +773,8 @@ xfs_direct_write_iomap_begin( /* may drop and re-acquire the ilock */ error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, - &lockmode, flags & IOMAP_DIRECT); + &lockmode, + (flags & IOMAP_DIRECT) || IS_DAX(inode)); if (error) goto out_unlock; if (shared) @@ -867,6 +868,33 @@ const struct iomap_ops xfs_direct_write_iomap_ops = { .iomap_begin = xfs_direct_write_iomap_begin, }; +static int +xfs_dax_write_iomap_end( + struct inode *inode, + loff_t pos, + loff_t length, + ssize_t written, + unsigned flags, + struct iomap *iomap) +{ + struct xfs_inode *ip = XFS_I(inode); + + if (!xfs_is_cow_inode(ip)) + return 0; + + if (!written) { + xfs_reflink_cancel_cow_range(ip, pos, length, true); + return 0; + } + + return xfs_reflink_end_cow(ip, pos, written); +} + +const struct iomap_ops xfs_dax_write_iomap_ops = { + .iomap_begin = xfs_direct_write_iomap_begin, + .iomap_end = xfs_dax_write_iomap_end, +}; + static int xfs_buffered_write_iomap_begin( struct inode *inode, diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index e88dc162c785e..c782e8c0479c0 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -51,5 +51,6 @@ extern const struct iomap_ops xfs_direct_write_iomap_ops; extern const struct iomap_ops xfs_read_iomap_ops; extern const struct iomap_ops xfs_seek_iomap_ops; extern const struct iomap_ops xfs_xattr_iomap_ops; +extern const struct iomap_ops xfs_dax_write_iomap_ops; #endif /* __XFS_IOMAP_H__*/ From 6896b1ac0d5f60986a02300c6cea879893f48c43 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 4 Jun 2022 11:45:10 -0700 Subject: [PATCH 124/321] xfs-support-cow-in-fsdax-mode-fix make xfs_dax_fault() static Reported-by: kernel test robot Cc: Shiyang Ruan Signed-off-by: Andrew Morton --- fs/xfs/xfs_file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 8bf1055d164ec..45d8e64188f36 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1256,7 +1256,7 @@ xfs_file_llseek( } #ifdef CONFIG_FS_DAX -int +static int xfs_dax_fault( struct vm_fault *vmf, enum page_entry_size pe_size, @@ -1269,7 +1269,7 @@ xfs_dax_fault( &xfs_read_iomap_ops); } #else -int +static int xfs_dax_fault( struct vm_fault *vmf, enum page_entry_size pe_size, From 7797522f7325483f7b0750d75834a34773bc6076 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Fri, 3 Jun 2022 13:37:38 +0800 Subject: [PATCH 125/321] xfs: add dax dedupe support Introduce xfs_mmaplock_two_inodes_and_break_dax_layout() for dax files who are going to be deduped. After that, call compare range function only when files are both DAX or not. Link: https://lkml.kernel.org/r/20220603053738.1218681-15-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Reviewed-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Cc: Al Viro Cc: Dan Williams Cc: Dan Williams Cc: Dave Chinner Cc: Goldwyn Rodrigues Cc: Goldwyn Rodrigues Cc: Jane Chu Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ritesh Harjani Signed-off-by: Andrew Morton --- fs/xfs/xfs_file.c | 2 +- fs/xfs/xfs_inode.c | 69 +++++++++++++++++++++++++++++++++++++++++--- fs/xfs/xfs_inode.h | 1 + fs/xfs/xfs_reflink.c | 4 +-- 4 files changed, 69 insertions(+), 7 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 45d8e64188f36..92a3af1e4414a 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -808,7 +808,7 @@ xfs_wait_dax_page( xfs_ilock(ip, XFS_MMAPLOCK_EXCL); } -static int +int xfs_break_dax_layouts( struct inode *inode, bool *retry) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 52d6f2c7d58b3..6f251781ebd03 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3767,6 +3767,50 @@ xfs_iolock_two_inodes_and_break_layout( return 0; } +static int +xfs_mmaplock_two_inodes_and_break_dax_layout( + struct xfs_inode *ip1, + struct xfs_inode *ip2) +{ + int error; + bool retry; + struct page *page; + + if (ip1->i_ino > ip2->i_ino) + swap(ip1, ip2); + +again: + retry = false; + /* Lock the first inode */ + xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); + error = xfs_break_dax_layouts(VFS_I(ip1), &retry); + if (error || retry) { + xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); + if (error == 0 && retry) + goto again; + return error; + } + + if (ip1 == ip2) + return 0; + + /* Nested lock the second inode */ + xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1)); + /* + * We cannot use xfs_break_dax_layouts() directly here because it may + * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable + * for this nested lock case. + */ + page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); + if (page && page_ref_count(page) != 1) { + xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); + xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); + goto again; + } + + return 0; +} + /* * Lock two inodes so that userspace cannot initiate I/O via file syscalls or * mmap activity. @@ -3781,8 +3825,19 @@ xfs_ilock2_io_mmap( ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); if (ret) return ret; - filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping, - VFS_I(ip2)->i_mapping); + + if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { + ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2); + if (ret) { + inode_unlock(VFS_I(ip2)); + if (ip1 != ip2) + inode_unlock(VFS_I(ip1)); + return ret; + } + } else + filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping, + VFS_I(ip2)->i_mapping); + return 0; } @@ -3792,8 +3847,14 @@ xfs_iunlock2_io_mmap( struct xfs_inode *ip1, struct xfs_inode *ip2) { - filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping, - VFS_I(ip2)->i_mapping); + if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { + xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); + if (ip1 != ip2) + xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); + } else + filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping, + VFS_I(ip2)->i_mapping); + inode_unlock(VFS_I(ip2)); if (ip1 != ip2) inode_unlock(VFS_I(ip1)); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 7be6f8e705ab4..8313cc83b6ee0 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -467,6 +467,7 @@ xfs_itruncate_extents( } /* from xfs_file.c */ +int xfs_break_dax_layouts(struct inode *inode, bool *retry); int xfs_break_layouts(struct inode *inode, uint *iolock, enum layout_break_reason reason); diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index cbaf36d210202..d07f06ff0f130 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1363,8 +1363,8 @@ xfs_reflink_remap_prep( if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest)) goto out_unlock; - /* Don't share DAX file data for now. */ - if (IS_DAX(inode_in) || IS_DAX(inode_out)) + /* Don't share DAX file data with non-DAX file. */ + if (IS_DAX(inode_in) != IS_DAX(inode_out)) goto out_unlock; if (!IS_DAX(inode_in)) From 38106de2088152971ab5f9abf3024a50e47e3611 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 11 Jun 2022 10:13:52 +0800 Subject: [PATCH 126/321] mm/page_alloc: minor clean up for memmap_init_compound() Since commit 5232c63f46fd ("mm: Make compound_pincount always available"), compound_pincount_ptr is stored at first tail page now. So we should call prep_compound_head() after the first tail page is initialized to take advantage of the likelihood of that tail struct page being cached given that we will read them right after in prep_compound_head(). Link: https://lkml.kernel.org/r/20220611021352.13529-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Joao Martins Signed-off-by: Andrew Morton --- mm/page_alloc.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ead871546ec9..1b134f0c7e39a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6686,13 +6686,18 @@ static void __ref memmap_init_compound(struct page *head, set_page_count(page, 0); /* - * The first tail page stores compound_mapcount_ptr() and - * compound_order() and the second tail page stores - * compound_pincount_ptr(). Call prep_compound_head() after - * the first and second tail pages have been initialized to - * not have the data overwritten. + * The first tail page stores compound_mapcount_ptr(), + * compound_order() and compound_pincount_ptr(). Call + * prep_compound_head() after the first tail page have + * been initialized to not have the data overwritten. + * + * Note the idea to make this right after we initialize + * the offending tail pages is trying to take advantage + * of the likelihood of those tail struct pages being + * cached given that we will read them right after in + * prep_compound_head(). */ - if (pfn == head_pfn + 2) + if (unlikely(pfn == head_pfn + 1)) prep_compound_head(head, order); } } From bc47d3b8c493a0664c1d7ca9f630440478797196 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:46 -0700 Subject: [PATCH 127/321] selftests: vm: add hugetlb_shared userfaultfd test to run_vmtests.sh Patch series "userfaultfd: add /dev/userfaultfd for fine grained access control", v3. This patch (of 6): This not being included was just a simple oversight. There are certain features (like minor fault support) which are only enabled on shared mappings, so without including hugetlb_shared we actually lose a significant amount of test coverage. Link: https://lkml.kernel.org/r/20220601210951.3916598-1-axelrasmussen@google.com Link: https://lkml.kernel.org/r/20220601210951.3916598-2-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Shuah Khan Reviewed-by: Peter Xu Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/run_vmtests.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh index 2af563a9652e0..9fb92fa39c474 100755 --- a/tools/testing/selftests/vm/run_vmtests.sh +++ b/tools/testing/selftests/vm/run_vmtests.sh @@ -121,9 +121,11 @@ run_test ./gup_test -a run_test ./gup_test -ct -F 0x1 0 19 0x1000 run_test ./userfaultfd anon 20 16 -# Test requires source and destination huge pages. Size of source -# (half_ufd_size_MB) is passed as argument to test. +# Hugetlb tests require source and destination huge pages. Pass in half the +# size ($half_ufd_size_MB), which is used for *each*. run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32 +run_test ./userfaultfd hugetlb_shared "$half_ufd_size_MB" 32 "$mnt"/uffd-test +rm -f "$mnt"/uffd-test run_test ./userfaultfd shmem 20 16 #cleanup From d6bbc761fc5be95266eb60ac85e2546b5afa0f69 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:47 -0700 Subject: [PATCH 128/321] userfaultfd: add /dev/userfaultfd for fine grained access control Historically, it has been shown that intercepting kernel faults with userfaultfd (thereby forcing the kernel to wait for an arbitrary amount of time) can be exploited, or at least can make some kinds of exploits easier. So, in 37cd0575b8 "userfaultfd: add UFFD_USER_MODE_ONLY" we changed things so, in order for kernel faults to be handled by userfaultfd, either the process needs CAP_SYS_PTRACE, or this sysctl must be configured so that any unprivileged user can do it. In a typical implementation of a hypervisor with live migration (take QEMU/KVM as one such example), we do indeed need to be able to handle kernel faults. But, both options above are less than ideal: - Toggling the sysctl increases attack surface by allowing any unprivileged user to do it. - Granting the live migration process CAP_SYS_PTRACE gives it this ability, but *also* the ability to "observe and control the execution of another process [...], and examine and change [its] memory and registers" (from ptrace(2)). This isn't something we need or want to be able to do, so granting this permission violates the "principle of least privilege". This is all a long winded way to say: we want a more fine-grained way to grant access to userfaultfd, without granting other additional permissions at the same time. To achieve this, add a /dev/userfaultfd misc device. This device provides an alternative to the userfaultfd(2) syscall for the creation of new userfaultfds. The idea is, any userfaultfds created this way will be able to handle kernel faults, without the caller having any special capabilities. Access to this mechanism is instead restricted using e.g. standard filesystem permissions. Link: https://lkml.kernel.org/r/20220601210951.3916598-3-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 76 ++++++++++++++++++++++++++------ include/uapi/linux/userfaultfd.h | 4 ++ 2 files changed, 66 insertions(+), 14 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index fe6f283d26d55..d398f6bf6d744 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -30,6 +30,7 @@ #include #include #include +#include int sysctl_unprivileged_userfaultfd __read_mostly; @@ -413,13 +414,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) if (ctx->features & UFFD_FEATURE_SIGBUS) goto out; - if ((vmf->flags & FAULT_FLAG_USER) == 0 && - ctx->flags & UFFD_USER_MODE_ONLY) { - printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " - "sysctl knob to 1 if kernel faults must be handled " - "without obtaining CAP_SYS_PTRACE capability\n"); + if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) goto out; - } /* * If it's already released don't get it. This avoids to loop @@ -2074,19 +2070,33 @@ static void init_once_userfaultfd_ctx(void *mem) seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); } -SYSCALL_DEFINE1(userfaultfd, int, flags) +static inline bool userfaultfd_allowed(bool is_syscall, int flags) +{ + bool kernel_faults = !(flags & UFFD_USER_MODE_ONLY); + bool allow_unprivileged = sysctl_unprivileged_userfaultfd; + + /* userfaultfd(2) access is controlled by sysctl + capability. */ + if (is_syscall && kernel_faults) { + if (!allow_unprivileged && !capable(CAP_SYS_PTRACE)) + return false; + } + + /* + * For /dev/userfaultfd, access is to be controlled using e.g. + * permissions on the device node. We assume this is correctly + * configured by userspace, so we simply allow access here. + */ + + return true; +} + +static int new_userfaultfd(bool is_syscall, int flags) { struct userfaultfd_ctx *ctx; int fd; - if (!sysctl_unprivileged_userfaultfd && - (flags & UFFD_USER_MODE_ONLY) == 0 && - !capable(CAP_SYS_PTRACE)) { - printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " - "sysctl knob to 1 if kernel faults must be handled " - "without obtaining CAP_SYS_PTRACE capability\n"); + if (!userfaultfd_allowed(is_syscall, flags)) return -EPERM; - } BUG_ON(!current->mm); @@ -2105,6 +2115,10 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) refcount_set(&ctx->refcount, 1); ctx->flags = flags; ctx->features = 0; + /* + * If UFFD_USER_MODE_ONLY is not set, then userfaultfd_allowed() above + * decided that kernel faults were allowed and should be handled. + */ ctx->released = false; atomic_set(&ctx->mmap_changing, 0); ctx->mm = current->mm; @@ -2120,8 +2134,42 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) return fd; } +SYSCALL_DEFINE1(userfaultfd, int, flags) +{ + return new_userfaultfd(true, flags); +} + +static int userfaultfd_dev_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags) +{ + if (cmd != USERFAULTFD_IOC_NEW) + return -EINVAL; + + return new_userfaultfd(false, flags); +} + +static const struct file_operations userfaultfd_dev_fops = { + .open = userfaultfd_dev_open, + .unlocked_ioctl = userfaultfd_dev_ioctl, + .compat_ioctl = userfaultfd_dev_ioctl, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct miscdevice userfaultfd_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "userfaultfd", + .fops = &userfaultfd_dev_fops +}; + static int __init userfaultfd_init(void) { + WARN_ON(misc_register(&userfaultfd_misc)); + userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", sizeof(struct userfaultfd_ctx), 0, diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 7d32b1e797fb2..005e5e3062664 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -12,6 +12,10 @@ #include +/* ioctls for /dev/userfaultfd */ +#define USERFAULTFD_IOC 0xAA +#define USERFAULTFD_IOC_NEW _IO(USERFAULTFD_IOC, 0x00) + /* * If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and * UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In From 16203b587f0b85646c4edefe150647e29a6e1f23 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:48 -0700 Subject: [PATCH 129/321] userfaultfd: selftests: modify selftest to use /dev/userfaultfd We clearly want to ensure both userfaultfd(2) and /dev/userfaultfd keep working into the future, so just run the test twice, using each interface. Link: https://lkml.kernel.org/r/20220601210951.3916598-4-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/userfaultfd.c | 37 +++++++++++++++++++++--- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 7c3f1b0ab468a..487d1d40cedf7 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -77,6 +77,9 @@ static int bounces; #define TEST_SHMEM 3 static int test_type; +/* test using /dev/userfaultfd, instead of userfaultfd(2) */ +static bool test_dev_userfaultfd; + /* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */ #define ALARM_INTERVAL_SECS 10 static volatile bool test_uffdio_copy_eexist = true; @@ -154,12 +157,14 @@ static void usage(void) ret, __LINE__); \ } while (0) -#define err(fmt, ...) \ +#define errexit(exitcode, fmt, ...) \ do { \ _err(fmt, ##__VA_ARGS__); \ - exit(1); \ + exit(exitcode); \ } while (0) +#define err(fmt, ...) errexit(1, fmt, ##__VA_ARGS__) + static void uffd_stats_reset(struct uffd_stats *uffd_stats, unsigned long n_cpus) { @@ -383,13 +388,31 @@ static void assert_expected_ioctls_present(uint64_t mode, uint64_t ioctls) } } +static void __userfaultfd_open_dev(void) +{ + int fd; + + uffd = -1; + fd = open("/dev/userfaultfd", O_RDWR | O_CLOEXEC); + if (fd < 0) + return; + + uffd = ioctl(fd, USERFAULTFD_IOC_NEW, + O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); + close(fd); +} + static void userfaultfd_open(uint64_t *features) { struct uffdio_api uffdio_api; - uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); + if (test_dev_userfaultfd) + __userfaultfd_open_dev(); + else + uffd = syscall(__NR_userfaultfd, + O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); if (uffd < 0) - err("userfaultfd syscall not available in this kernel"); + errexit(KSFT_SKIP, "creating userfaultfd failed"); uffd_flags = fcntl(uffd, F_GETFD, NULL); uffdio_api.api = UFFD_API; @@ -1691,6 +1714,12 @@ int main(int argc, char **argv) } printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", nr_pages, nr_pages_per_cpu); + + test_dev_userfaultfd = false; + if (userfaultfd_stress()) + return 1; + + test_dev_userfaultfd = true; return userfaultfd_stress(); } From 30cfbef337cc1ea915df17c7e4792af2f25a7c4d Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:49 -0700 Subject: [PATCH 130/321] userfaultfd: update documentation to describe /dev/userfaultfd Explain the different ways to create a new userfaultfd, and how access control works for each way. Link: https://lkml.kernel.org/r/20220601210951.3916598-5-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/userfaultfd.rst | 40 ++++++++++++++++++-- Documentation/admin-guide/sysctl/vm.rst | 3 ++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst index 6528036093e1f..9bae1acd431fa 100644 --- a/Documentation/admin-guide/mm/userfaultfd.rst +++ b/Documentation/admin-guide/mm/userfaultfd.rst @@ -17,7 +17,10 @@ of the ``PROT_NONE+SIGSEGV`` trick. Design ====== -Userfaults are delivered and resolved through the ``userfaultfd`` syscall. +Userspace creates a new userfaultfd, initializes it, and registers one or more +regions of virtual memory with it. Then, any page faults which occur within the +region(s) result in a message being delivered to the userfaultfd, notifying +userspace of the fault. The ``userfaultfd`` (aside from registering and unregistering virtual memory ranges) provides two primary functionalities: @@ -34,12 +37,11 @@ The real advantage of userfaults if compared to regular virtual memory management of mremap/mprotect is that the userfaults in all their operations never involve heavyweight structures like vmas (in fact the ``userfaultfd`` runtime load never takes the mmap_lock for writing). - Vmas are not suitable for page- (or hugepage) granular fault tracking when dealing with virtual address spaces that could span Terabytes. Too many vmas would be needed for that. -The ``userfaultfd`` once opened by invoking the syscall, can also be +The ``userfaultfd``, once created, can also be passed using unix domain sockets to a manager process, so the same manager process could handle the userfaults of a multitude of different processes without them being aware about what is going on @@ -50,6 +52,38 @@ is a corner case that would currently return ``-EBUSY``). API === +Creating a userfaultfd +---------------------- + +There are two ways to create a new userfaultfd, each of which provide ways to +restrict access to this functionality (since historically userfaultfds which +handle kernel page faults have been a useful tool for exploiting the kernel). + +The first way, supported by older kernels, is the userfaultfd(2) syscall. +Access to this is controlled in several ways: + +- By default, the userfaultfd will be able to handle kernel page faults. This + can be disabled by passing in UFFD_USER_MODE_ONLY. + +- If vm.unprivileged_userfaultfd is 0, then the caller must *either* have + CAP_SYS_PTRACE, or pass in UFFD_USER_MODE_ONLY. + +- If vm.unprivileged_userfaultfd is 1, then no particular privilege is needed to + use this syscall, even if UFFD_USER_MODE_ONLY is *not* set. + +The second way, added to the kernel more recently, is by opening and issuing a +USERFAULTFD_IOC_NEW ioctl to /dev/userfaultfd. This method yields equivalent +userfaultfds to the userfaultfd(2) syscall; its benefit is in how access to +creating userfaultfds is controlled. + +Access to /dev/userfaultfd is controlled via normal filesystem permissions +(user/group/mode for example), which gives fine grained access to userfaultfd +specifically, without also granting other unrelated privileges at the same time +(as e.g. granting CAP_SYS_PTRACE would do). + +Initializing up a userfaultfd +----------------------------- + When first opened the ``userfaultfd`` must be enabled invoking the ``UFFDIO_API`` ioctl specifying a ``uffdio_api.api`` value set to ``UFFD_API`` (or a later API version) which will specify the ``read/POLLIN`` protocol diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index f74f722ad7028..5a810ea6dc77f 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -927,6 +927,9 @@ calls without any restrictions. The default value is 0. +An alternative to this sysctl / the userfaultfd(2) syscall is to create +userfaultfds via /dev/userfaultfd. See +Documentation/admin-guide/mm/userfaultfd.rst. user_reserve_kbytes =================== From c7f7561075491355ad13ab972ba081de52a95aa4 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:50 -0700 Subject: [PATCH 131/321] userfaultfd: selftests: make /dev/userfaultfd testing configurable Instead of always testing both userfaultfd(2) and /dev/userfaultfd, let the user choose which to test. As with other test features, change the behavior based on a new command line flag. Introduce the idea of "test mods", which are generic (not specific to a test type) modifications to the behavior of the test. This is sort of borrowed from this RFC patch series [1], but simplified a bit. The benefit is, in "typical" configurations this test is somewhat slow (say, 30sec or something). Testing both clearly doubles it, so it may not always be desirable, as users are likely to use one or the other, but never both, in the "real world". [1]: https://patchwork.kernel.org/project/linux-mm/patch/20201129004548.1619714-14-namit@vmware.com/ Link: https://lkml.kernel.org/r/20220601210951.3916598-6-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/userfaultfd.c | 41 +++++++++++++++++------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 487d1d40cedf7..28b881523d159 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -128,6 +128,8 @@ struct uffd_stats { const char *examples = "# Run anonymous memory test on 100MiB region with 99999 bounces:\n" "./userfaultfd anon 100 99999\n\n" + "# Run the same anonymous memory test, but using /dev/userfaultfd:\n" + "./userfaultfd anon:dev 100 99999\n\n" "# Run share memory test on 1GiB region with 99 bounces:\n" "./userfaultfd shmem 1000 99\n\n" "# Run hugetlb memory test on 256MiB region with 50 bounces:\n" @@ -144,6 +146,13 @@ static void usage(void) "[hugetlbfs_file]\n\n"); fprintf(stderr, "Supported : anon, hugetlb, " "hugetlb_shared, shmem\n\n"); + fprintf(stderr, "'Test mods' can be joined to the test type string with a ':'. " + "Supported mods:\n"); + fprintf(stderr, "\tdev - Use /dev/userfaultfd instead of userfaultfd(2)\n"); + fprintf(stderr, "\nExample test mod usage:\n"); + fprintf(stderr, "# Run anonymous memory test with /dev/userfaultfd:\n"); + fprintf(stderr, "./userfaultfd anon:dev 100 99999\n\n"); + fprintf(stderr, "Examples:\n\n"); fprintf(stderr, "%s", examples); exit(1); @@ -1607,8 +1616,6 @@ unsigned long default_huge_page_size(void) static void set_test_type(const char *type) { - uint64_t features = UFFD_API_FEATURES; - if (!strcmp(type, "anon")) { test_type = TEST_ANON; uffd_test_ops = &anon_uffd_test_ops; @@ -1626,10 +1633,28 @@ static void set_test_type(const char *type) test_type = TEST_SHMEM; uffd_test_ops = &shmem_uffd_test_ops; test_uffdio_minor = true; - } else { - err("Unknown test type: %s", type); + } +} + +static void parse_test_type_arg(const char *raw_type) +{ + char *buf = strdup(raw_type); + uint64_t features = UFFD_API_FEATURES; + + while (buf) { + const char *token = strsep(&buf, ":"); + + if (!test_type) + set_test_type(token); + else if (!strcmp(token, "dev")) + test_dev_userfaultfd = true; + else + err("unrecognized test mod '%s'", token); } + if (!test_type) + err("failed to parse test type argument: '%s'", raw_type); + if (test_type == TEST_HUGETLB) page_size = default_huge_page_size(); else @@ -1676,7 +1701,7 @@ int main(int argc, char **argv) err("failed to arm SIGALRM"); alarm(ALARM_INTERVAL_SECS); - set_test_type(argv[1]); + parse_test_type_arg(argv[1]); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); nr_pages_per_cpu = atol(argv[2]) * 1024*1024 / page_size / @@ -1714,12 +1739,6 @@ int main(int argc, char **argv) } printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", nr_pages, nr_pages_per_cpu); - - test_dev_userfaultfd = false; - if (userfaultfd_stress()) - return 1; - - test_dev_userfaultfd = true; return userfaultfd_stress(); } From d2cabd1a04db28767959e1ea05fd2ac95773cd56 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Wed, 1 Jun 2022 14:09:51 -0700 Subject: [PATCH 132/321] selftests: vm: add /dev/userfaultfd test cases to run_vmtests.sh This new mode was recently added to the userfaultfd selftest. We want to exercise both userfaultfd(2) as well as /dev/userfaultfd, so add both test cases to the script. Link: https://lkml.kernel.org/r/20220601210951.3916598-7-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Shuah Khan Cc: Al Viro Cc: Charan Teja Kalla Cc: Dave Hansen Cc: Dmitry V. Levin Cc: Gleb Fotengauer-Malinovskiy Cc: Hugh Dickins Cc: Jan Kara Cc: Jonathan Corbet Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Peter Xu Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zhang Yi Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/run_vmtests.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh index 9fb92fa39c474..d84fe0fa15e1f 100755 --- a/tools/testing/selftests/vm/run_vmtests.sh +++ b/tools/testing/selftests/vm/run_vmtests.sh @@ -121,12 +121,17 @@ run_test ./gup_test -a run_test ./gup_test -ct -F 0x1 0 19 0x1000 run_test ./userfaultfd anon 20 16 +run_test ./userfaultfd anon:dev 20 16 # Hugetlb tests require source and destination huge pages. Pass in half the # size ($half_ufd_size_MB), which is used for *each*. run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32 +run_test ./userfaultfd hugetlb:dev "$half_ufd_size_MB" 32 run_test ./userfaultfd hugetlb_shared "$half_ufd_size_MB" 32 "$mnt"/uffd-test rm -f "$mnt"/uffd-test +run_test ./userfaultfd hugetlb_shared:dev "$half_ufd_size_MB" 32 "$mnt"/uffd-test +rm -f "$mnt"/uffd-test run_test ./userfaultfd shmem 20 16 +run_test ./userfaultfd shmem:dev 20 16 #cleanup umount "$mnt" From 61b6159a2ebc4a4a3c8502a08dcedbe09190efe6 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Wed, 15 Jun 2022 17:40:58 +0000 Subject: [PATCH 133/321] mm/mlock: drop dead code in count_mm_mlocked_page_nr() The check for mm being null has never been needed since the only caller has always passed in current->mm. Remove the check from count_mm_mlocked_page_nr(). Link: https://lkml.kernel.org/r/20220615174050.738523-1-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Suggested-by: Lukas Bulwahn Signed-off-by: Andrew Morton --- mm/mlock.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/mlock.c b/mm/mlock.c index 43d19a1f28eb3..7032f6dd0ce19 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -531,14 +531,12 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long end; VMA_ITERATOR(vmi, mm, start); - if (mm == NULL) - mm = current->mm; - /* Don't overflow past ULONG_MAX */ if (unlikely(ULONG_MAX - len < start)) end = ULONG_MAX; else end = start + len; + for_each_vma_range(vmi, vma, end) { if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) From 9492c2606b9a9787fb763191646e37ecb795ee5a Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:34 -0700 Subject: [PATCH 134/321] mm: khugepaged: check THP flag in hugepage_vma_check() Patch series "Cleanup transhuge_xxx helpers", v5. This series is the follow-up of the discussion about cleaning up transhuge_xxx helpers at https://lore.kernel.org/linux-mm/627a71f8-e879-69a5-ceb3-fc8d29d2f7f1@suse.cz/. THP has a bunch of helpers that do VMA sanity check for different paths, they do the similar checks for the most callsites and have a lot duplicate codes. And it is confusing what helpers should be used at what conditions. This series reorganized and cleaned up the code so that we could consolidate all the checks into hugepage_vma_check(). The transhuge_vma_enabled(), transparent_hugepage_active() and __transparent_hugepage_enabled() are killed by this series. This patch (of 7): Currently the THP flag check in hugepage_vma_check() will fallthrough if the flag is NEVER and VM_HUGEPAGE is set. This is not a problem for now since all the callers have the flag checked before or can't be invoked if the flag is NEVER. However, the following patch will call hugepage_vma_check() in more places, for example, page fault, so this flag must be checked in hugepge_vma_check(). Link: https://lkml.kernel.org/r/20220616174840.1202070-1-shy828301@gmail.com Link: https://lkml.kernel.org/r/20220616174840.1202070-2-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Reviewed-by: Miaohe Lin Cc: Vlastimil Babka Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Signed-off-by: Andrew Morton --- mm/khugepaged.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 37dcf95640e6e..df5bd7c8697c1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -451,6 +451,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, if (shmem_file(vma->vm_file)) return shmem_huge_enabled(vma); + if (!khugepaged_enabled()) + return false; + /* THP settings require madvise. */ if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) return false; From 4ffb90c5926e704a44a0154aca3ac1c7482e3708 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:35 -0700 Subject: [PATCH 135/321] mm: thp: consolidate vma size check to transhuge_vma_suitable There are couple of places that check whether the vma size is ok for THP or whether address fits, they are open coded and duplicate, use transhuge_vma_suitable() to do the job by passing in (vma->end - HPAGE_PMD_SIZE). Move vma size check into hugepage_vma_check(). This will make khugepaged_enter() is as same as khugepaged_enter_vma(). There is just one caller for khugepaged_enter(), replace it to khugepaged_enter_vma() and remove khugepaged_enter(). Link: https://lkml.kernel.org/r/20220616174840.1202070-3-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 11 +++++++++++ include/linux/khugepaged.h | 14 -------------- mm/huge_memory.c | 2 +- mm/khugepaged.c | 19 ++++++------------- 4 files changed, 18 insertions(+), 28 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 648cb3ce7099b..8a5a8bfce0f54 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -116,6 +116,17 @@ extern struct kobj_attribute shmem_enabled_attr; extern unsigned long transparent_hugepage_flags; +/* + * Do the below checks: + * - For file vma, check if the linear page offset of vma is + * HPAGE_PMD_NR aligned within the file. The hugepage is + * guaranteed to be hugepage-aligned within the file, but we must + * check that the PMD-aligned addresses in the VMA map to + * PMD-aligned offsets within the file, else the hugepage will + * not be PMD-mappable. + * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE + * area. + */ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long addr) { diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 392d34c3c59aa..31ca8a7f78f4f 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -51,16 +51,6 @@ static inline void khugepaged_exit(struct mm_struct *mm) if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) __khugepaged_exit(mm); } - -static inline void khugepaged_enter(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - khugepaged_enabled()) { - if (hugepage_vma_check(vma, vm_flags)) - __khugepaged_enter(vma->vm_mm); - } -} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { @@ -68,10 +58,6 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm static inline void khugepaged_exit(struct mm_struct *mm) { } -static inline void khugepaged_enter(struct vm_area_struct *vma, - unsigned long vm_flags) -{ -} static inline void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e6598f1d5bcd8..5f505185ad232 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -726,7 +726,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - khugepaged_enter(vma, vma->vm_flags); + khugepaged_enter_vma(vma, vma->vm_flags); if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && diff --git a/mm/khugepaged.c b/mm/khugepaged.c index df5bd7c8697c1..a745060763fb5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -443,8 +443,8 @@ bool hugepage_vma_check(struct vm_area_struct *vma, if (vma_is_dax(vma)) return false; - if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - - vma->vm_pgoff, HPAGE_PMD_NR)) + /* Check alignment for file vma and size for both file and anon vma */ + if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) return false; /* Enabled via shmem mount options or sysfs settings. */ @@ -505,9 +505,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - khugepaged_enabled() && - (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK))) { + khugepaged_enabled()) { if (hugepage_vma_check(vma, vm_flags)) __khugepaged_enter(vma->vm_mm); } @@ -948,7 +946,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, struct vm_area_struct **vmap) { struct vm_area_struct *vma; - unsigned long hstart, hend; if (unlikely(khugepaged_test_exit(mm))) return SCAN_ANY_PROCESS; @@ -957,9 +954,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!vma) return SCAN_VMA_NULL; - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (address < hstart || address + HPAGE_PMD_SIZE > hend) + if (!transhuge_vma_suitable(vma, address)) return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; @@ -2140,10 +2135,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; continue; } - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (hstart >= hend) - goto skip; + hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); + hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); if (khugepaged_scan.address > hend) goto skip; if (khugepaged_scan.address < hstart) From 0ce32ef82f7afe5585fcc8575b50ea8881628e7f Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:36 -0700 Subject: [PATCH 136/321] mm: khugepaged: better comments for anon vma check in hugepage_vma_revalidate The hugepage_vma_revalidate() needs to check if the vma is still anonymous vma or not since the address may be unmapped then remapped to file before khugepaged reaquired the mmap_lock. The old comment is not quite helpful, elaborate this with better comment. Link: https://lkml.kernel.org/r/20220616174840.1202070-4-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/khugepaged.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a745060763fb5..9a63c18b1cfb7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -958,7 +958,13 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; - /* Anon VMA expected */ + /* + * Anon VMA expected, the address may be unmapped then + * remapped to file after khugepaged reaquired the mmap_lock. + * + * hugepage_vma_check may return true for qualified file + * vmas. + */ if (!vma->anon_vma || !vma_is_anonymous(vma)) return SCAN_VMA_CHECK; return 0; From 96d7237ce582fa0a84e9c421d1ee03bd33607f0c Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:37 -0700 Subject: [PATCH 137/321] mm: thp: kill transparent_hugepage_active() The transparent_hugepage_active() was introduced to show THP eligibility bit in smaps in proc, smaps is the only user. But it actually does the similar check as hugepage_vma_check() which is used by khugepaged. We definitely don't have to maintain two similar checks, so kill transparent_hugepage_active(). This patch also fixed the wrong behavior for VM_NO_KHUGEPAGED vmas. Also move hugepage_vma_check() to huge_memory.c and huge_mm.h since it is not only for khugepaged anymore. Link: https://lkml.kernel.org/r/20220616174840.1202070-5-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 2 +- include/linux/huge_mm.h | 16 +++++++----- include/linux/khugepaged.h | 2 -- mm/huge_memory.c | 50 +++++++++++++++++++++++++++++++------- mm/khugepaged.c | 48 +++--------------------------------- 5 files changed, 56 insertions(+), 62 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0f2bb17026552..00f4ff7435d3a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -866,7 +866,7 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); seq_printf(m, "THPeligible: %d\n", - transparent_hugepage_active(vma)); + hugepage_vma_check(vma, vma->vm_flags, true)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 8a5a8bfce0f54..64487bcd0c7ba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -202,7 +202,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool transparent_hugepage_active(struct vm_area_struct *vma); +bool hugepage_vma_check(struct vm_area_struct *vma, + unsigned long vm_flags, + bool smaps); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -351,11 +353,6 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -static inline bool transparent_hugepage_active(struct vm_area_struct *vma) -{ - return false; -} - static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long addr) { @@ -368,6 +365,13 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, return false; } +static inline bool hugepage_vma_check(struct vm_area_struct *vma, + unsigned long vm_flags, + bool smaps) +{ + return false; +} + static inline void prep_transhuge_page(struct page *page) {} #define transparent_hugepage_flags 0UL diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 31ca8a7f78f4f..ea5fd4c398f77 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -10,8 +10,6 @@ extern struct attribute_group khugepaged_attr_group; extern int khugepaged_init(void); extern void khugepaged_destroy(void); extern int start_stop_khugepaged(void); -extern bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern void khugepaged_enter_vma(struct vm_area_struct *vma, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5f505185ad232..9f9e6d7c4dff3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -69,21 +69,53 @@ static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; -bool transparent_hugepage_active(struct vm_area_struct *vma) +bool hugepage_vma_check(struct vm_area_struct *vma, + unsigned long vm_flags, + bool smaps) { - /* The addr is used to check if the vma size fits */ - unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; + if (!transhuge_vma_enabled(vma, vm_flags)) + return false; + + if (vm_flags & VM_NO_KHUGEPAGED) + return false; + + /* Don't run khugepaged against DAX vma */ + if (vma_is_dax(vma)) + return false; - if (!transhuge_vma_suitable(vma, addr)) + /* Check alignment for file vma and size for both file and anon vma */ + if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) return false; - if (vma_is_anonymous(vma)) - return __transparent_hugepage_enabled(vma); - if (vma_is_shmem(vma)) + + /* Enabled via shmem mount options or sysfs settings. */ + if (shmem_file(vma->vm_file)) return shmem_huge_enabled(vma); - if (transhuge_vma_enabled(vma, vma->vm_flags) && file_thp_enabled(vma)) + + if (!khugepaged_enabled()) + return false; + + /* THP settings require madvise. */ + if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) + return false; + + /* Only regular file is valid */ + if (file_thp_enabled(vma)) return true; - return false; + if (!vma_is_anonymous(vma)) + return false; + + if (vma_is_temporary_stack(vma)) + return false; + + /* + * THPeligible bit of smaps should show 1 for proper VMAs even + * though anon_vma is not initialized yet. + */ + if (!vma->anon_vma) + return smaps; + + return true; } static bool get_huge_zero_page(void) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 9a63c18b1cfb7..a7a5b61aa7923 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -430,46 +430,6 @@ static inline int khugepaged_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } -bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - if (!transhuge_vma_enabled(vma, vm_flags)) - return false; - - if (vm_flags & VM_NO_KHUGEPAGED) - return false; - - /* Don't run khugepaged against DAX vma */ - if (vma_is_dax(vma)) - return false; - - /* Check alignment for file vma and size for both file and anon vma */ - if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) - return false; - - /* Enabled via shmem mount options or sysfs settings. */ - if (shmem_file(vma->vm_file)) - return shmem_huge_enabled(vma); - - if (!khugepaged_enabled()) - return false; - - /* THP settings require madvise. */ - if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) - return false; - - /* Only regular file is valid */ - if (file_thp_enabled(vma)) - return true; - - if (!vma->anon_vma || !vma_is_anonymous(vma)) - return false; - if (vma_is_temporary_stack(vma)) - return false; - - return true; -} - void __khugepaged_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; @@ -506,7 +466,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && khugepaged_enabled()) { - if (hugepage_vma_check(vma, vm_flags)) + if (hugepage_vma_check(vma, vm_flags, false)) __khugepaged_enter(vma->vm_mm); } } @@ -956,7 +916,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!transhuge_vma_suitable(vma, address)) return SCAN_ADDRESS_RANGE; - if (!hugepage_vma_check(vma, vma->vm_flags)) + if (!hugepage_vma_check(vma, vma->vm_flags, false)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then @@ -1441,7 +1401,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() * will not fail the vma for missing VM_HUGEPAGE */ - if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) + if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false)) return; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ @@ -2136,7 +2096,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; break; } - if (!hugepage_vma_check(vma, vma->vm_flags)) { + if (!hugepage_vma_check(vma, vma->vm_flags, false)) { skip: progress++; continue; From 5a4dd3fc00896fe755b807846a0f81e9e50d96da Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 21 Jun 2022 17:51:42 -0700 Subject: [PATCH 138/321] mm-thp-kill-transparent_hugepage_active-fix check vma->vm_mm, per Zach Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9f9e6d7c4dff3..a407539509ef3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -73,6 +73,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps) { + if (!vma->vm_mm) + return false; + if (!transhuge_vma_enabled(vma, vm_flags)) return false; From 1f7a1be1a21289261bb270a060d3c64562dc8fc3 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Jun 2022 17:02:45 -0700 Subject: [PATCH 139/321] mm-thp-kill-transparent_hugepage_active-fix-fix add comment to vdso check Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a407539509ef3..f40b49b173df4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -73,7 +73,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps) { - if (!vma->vm_mm) + if (!vma->vm_mm) /* vdso */ return false; if (!transhuge_vma_enabled(vma, vm_flags)) From 866ffc7b77cca3c2ea48e25d2b4636aeb9bd1de7 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:38 -0700 Subject: [PATCH 140/321] mm: thp: kill __transhuge_page_enabled() The page fault path checks THP eligibility with __transhuge_page_enabled() which does the similar thing as hugepage_vma_check(), so use hugepage_vma_check() instead. However page fault allows DAX and !anon_vma cases, so added a new flag, in_pf, to hugepage_vma_check() to make page fault work correctly. The in_pf flag is also used to skip shmem and file THP for page fault since shmem handles THP in its own shmem_fault() and file THP allocation on fault is not supported yet. Also remove hugepage_vma_enabled() since hugepage_vma_check() is the only caller now, it is not necessary to have a helper function. Link: https://lkml.kernel.org/r/20220616174840.1202070-6-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 2 +- include/linux/huge_mm.h | 57 ++--------------------------------------- mm/huge_memory.c | 51 ++++++++++++++++++++++++++++-------- mm/khugepaged.c | 8 +++--- mm/memory.c | 7 +++-- 5 files changed, 52 insertions(+), 73 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 00f4ff7435d3a..34d292cec79a6 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -866,7 +866,7 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); seq_printf(m, "THPeligible: %d\n", - hugepage_vma_check(vma, vma->vm_flags, true)); + hugepage_vma_check(vma, vma->vm_flags, true, false)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 64487bcd0c7ba..cd8a6c5d9fe5a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -146,48 +146,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, return true; } -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - /* Explicitly disabled through madvise. */ - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - return false; - return true; -} - -/* - * to be used on vmas which are known to support THP. - * Use transparent_hugepage_active otherwise - */ -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - - /* - * If the hardware/firmware marked hugepage support disabled. - */ - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) - return false; - - if (!transhuge_vma_enabled(vma, vma->vm_flags)) - return false; - - if (vma_is_temporary_stack(vma)) - return false; - - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) - return true; - - if (vma_is_dax(vma)) - return true; - - if (transparent_hugepage_flags & - (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) - return !!(vma->vm_flags & VM_HUGEPAGE); - - return false; -} - static inline bool file_thp_enabled(struct vm_area_struct *vma) { struct inode *inode; @@ -204,7 +162,7 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps); + bool smaps, bool in_pf); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -348,26 +306,15 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - return false; -} - static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long addr) { return false; } -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - return false; -} - static inline bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps) + bool smaps, bool in_pf) { return false; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f40b49b173df4..31fbfa89f6484 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -71,27 +71,53 @@ unsigned long huge_zero_pfn __read_mostly = ~0UL; bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps) + bool smaps, bool in_pf) { if (!vma->vm_mm) /* vdso */ return false; - if (!transhuge_vma_enabled(vma, vm_flags)) + /* + * Explicitly disabled through madvise or prctl, or some + * architectures may disable THP for some mappings, for + * example, s390 kvm. + * */ + if ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) return false; - - if (vm_flags & VM_NO_KHUGEPAGED) + /* + * If the hardware/firmware marked hugepage support disabled. + */ + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) return false; - /* Don't run khugepaged against DAX vma */ + /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ if (vma_is_dax(vma)) + return in_pf; + + /* + * Special VMA and hugetlb VMA. + * Must be checked after dax since some dax mappings may have + * VM_MIXEDMAP set. + */ + if (vm_flags & VM_NO_KHUGEPAGED) return false; - /* Check alignment for file vma and size for both file and anon vma */ - if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) + /* + * Check alignment for file vma and size for both file and anon vma. + * + * Skip the check for page fault. Huge fault does the check in fault + * handlers. And this check is not suitable for huge PUD fault. + */ + if (!in_pf && + !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) return false; - /* Enabled via shmem mount options or sysfs settings. */ - if (shmem_file(vma->vm_file)) + /* + * Enabled via shmem mount options or sysfs settings. + * Must be done before hugepage flags check since shmem has its + * own flags. + */ + if (!in_pf && shmem_file(vma->vm_file)) return shmem_huge_enabled(vma); if (!khugepaged_enabled()) @@ -102,7 +128,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, return false; /* Only regular file is valid */ - if (file_thp_enabled(vma)) + if (!in_pf && file_thp_enabled(vma)) return true; if (!vma_is_anonymous(vma)) @@ -114,9 +140,12 @@ bool hugepage_vma_check(struct vm_area_struct *vma, /* * THPeligible bit of smaps should show 1 for proper VMAs even * though anon_vma is not initialized yet. + * + * Allow page fault since anon_vma may be not initialized until + * the first page fault. */ if (!vma->anon_vma) - return smaps; + return (smaps || in_pf); return true; } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a7a5b61aa7923..51ab2487d94a4 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -466,7 +466,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && khugepaged_enabled()) { - if (hugepage_vma_check(vma, vm_flags, false)) + if (hugepage_vma_check(vma, vm_flags, false, false)) __khugepaged_enter(vma->vm_mm); } } @@ -916,7 +916,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!transhuge_vma_suitable(vma, address)) return SCAN_ADDRESS_RANGE; - if (!hugepage_vma_check(vma, vma->vm_flags, false)) + if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then @@ -1401,7 +1401,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() * will not fail the vma for missing VM_HUGEPAGE */ - if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false)) + if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false)) return; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ @@ -2096,7 +2096,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; break; } - if (!hugepage_vma_check(vma, vma->vm_flags, false)) { + if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) { skip: progress++; continue; diff --git a/mm/memory.c b/mm/memory.c index 5c14a9037b810..29e44dcc0179c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4987,6 +4987,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, .gfp_mask = __get_fault_gfp_mask(vma), }; struct mm_struct *mm = vma->vm_mm; + unsigned long vm_flags = vma->vm_flags; pgd_t *pgd; p4d_t *p4d; vm_fault_t ret; @@ -5000,7 +5001,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (!vmf.pud) return VM_FAULT_OOM; retry_pud: - if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { + if (pud_none(*vmf.pud) && + hugepage_vma_check(vma, vm_flags, false, true)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5033,7 +5035,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; - if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { + if (pmd_none(*vmf.pmd) && + hugepage_vma_check(vma, vm_flags, false, true)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; From 47a5d55416e41f6571fdb103f552ade6a42dabfd Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 16 Jun 2022 10:48:39 -0700 Subject: [PATCH 141/321] mm: khugepaged: reorg some khugepaged helpers The khugepaged_{enabled|always|req_madv} are not khugepaged only anymore, move them to huge_mm.h and rename to hugepage_flags_xxx, and remove khugepaged_req_madv due to no users. Also move khugepaged_defrag to khugepaged.c since its only caller is in that file, it doesn't have to be in a header file. Link: https://lkml.kernel.org/r/20220616174840.1202070-7-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 8 ++++++++ include/linux/khugepaged.h | 14 -------------- mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 18 +++++++++++------- 4 files changed, 21 insertions(+), 23 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cd8a6c5d9fe5a..ae3d8e2fd9e2f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -116,6 +116,14 @@ extern struct kobj_attribute shmem_enabled_attr; extern unsigned long transparent_hugepage_flags; +#define hugepage_flags_enabled() \ + (transparent_hugepage_flags & \ + ((1<flags)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 31fbfa89f6484..532ede06a10f7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -120,11 +120,11 @@ bool hugepage_vma_check(struct vm_area_struct *vma, if (!in_pf && shmem_file(vma->vm_file)) return shmem_huge_enabled(vma); - if (!khugepaged_enabled()) + if (!hugepage_flags_enabled()) return false; /* THP settings require madvise. */ - if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) + if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always()) return false; /* Only regular file is valid */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 51ab2487d94a4..cfe231c5958f7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -465,7 +465,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - khugepaged_enabled()) { + hugepage_flags_enabled()) { if (hugepage_vma_check(vma, vm_flags, false, false)) __khugepaged_enter(vma->vm_mm); } @@ -761,6 +761,10 @@ static bool khugepaged_scan_abort(int nid) return false; } +#define khugepaged_defrag() \ + (transparent_hugepage_flags & \ + (1< Date: Thu, 16 Jun 2022 10:48:40 -0700 Subject: [PATCH 142/321] doc: proc: fix the description to THPeligible The THPeligible bit shows 1 if and only if the VMA is eligible for allocating THP and the THP is also PMD mappable. Some misaligned file VMAs may be eligible for allocating THP but the THP can't be mapped by PMD. Make this more explicitly to avoid ambiguity. Link: https://lkml.kernel.org/r/20220616174840.1202070-8-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: Zach O'Keefe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 0b5120ff506c8..e7aafc82be999 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -517,8 +517,10 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. "SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this does not take into account swapped out page of underlying shmem objects. "Locked" indicates whether the mapping is locked in memory or not. + "THPeligible" indicates whether the mapping is eligible for allocating THP -pages - 1 if true, 0 otherwise. It just shows the current status. +pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise. +It just shows the current status. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter From a278d5f6723808f76a453985bdbbd8eaa5d5d87b Mon Sep 17 00:00:00 2001 From: Kuan-Ying Lee Date: Wed, 15 Jun 2022 14:22:18 +0800 Subject: [PATCH 143/321] kasan: separate double free case from invalid free Currently, KASAN describes all invalid-free/double-free bugs as "double-free or invalid-free". This is ambiguous. KASAN should report "double-free" when a double-free is a more likely cause (the address points to the start of an object) and report "invalid-free" otherwise [1]. [1] https://bugzilla.kernel.org/show_bug.cgi?id=212193 Link: https://lkml.kernel.org/r/20220615062219.22618-1-Kuan-Ying.Lee@mediatek.com Signed-off-by: Kuan-Ying Lee Reviewed-by: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Vincenzo Frascino Cc: Matthias Brugger Cc: Chinwen Chang Cc: Yee Lee Cc: Andrew Yang Signed-off-by: Andrew Morton --- mm/kasan/common.c | 8 ++++---- mm/kasan/kasan.h | 3 ++- mm/kasan/report.c | 12 ++++++++---- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index c40c0e7b3b5f1..707c3a527fcbd 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -343,7 +343,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { - kasan_report_invalid_free(tagged_object, ip); + kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); return true; } @@ -352,7 +352,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, return false; if (!kasan_byte_accessible(tagged_object)) { - kasan_report_invalid_free(tagged_object, ip); + kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); return true; } @@ -377,12 +377,12 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) { - kasan_report_invalid_free(ptr, ip); + kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); return true; } if (!kasan_byte_accessible(ptr)) { - kasan_report_invalid_free(ptr, ip); + kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE); return true; } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 610d60d6e5b83..01c03e45acd42 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -125,6 +125,7 @@ static inline bool kasan_sync_fault_possible(void) enum kasan_report_type { KASAN_REPORT_ACCESS, KASAN_REPORT_INVALID_FREE, + KASAN_REPORT_DOUBLE_FREE, }; struct kasan_report_info { @@ -277,7 +278,7 @@ static inline void kasan_print_address_stack_frame(const void *addr) { } bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -void kasan_report_invalid_free(void *object, unsigned long ip); +void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type); struct page *kasan_addr_to_page(const void *addr); struct slab *kasan_addr_to_slab(const void *addr); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index b341a191651d3..fe3f606b3a986 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -176,8 +176,12 @@ static void end_report(unsigned long *flags, void *addr) static void print_error_description(struct kasan_report_info *info) { if (info->type == KASAN_REPORT_INVALID_FREE) { - pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", - (void *)info->ip); + pr_err("BUG: KASAN: invalid-free in %pS\n", (void *)info->ip); + return; + } + + if (info->type == KASAN_REPORT_DOUBLE_FREE) { + pr_err("BUG: KASAN: double-free in %pS\n", (void *)info->ip); return; } @@ -433,7 +437,7 @@ static void print_report(struct kasan_report_info *info) } } -void kasan_report_invalid_free(void *ptr, unsigned long ip) +void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type) { unsigned long flags; struct kasan_report_info info; @@ -448,7 +452,7 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip) start_report(&flags, true); - info.type = KASAN_REPORT_INVALID_FREE; + info.type = type; info.access_addr = ptr; info.first_bad_addr = kasan_reset_tag(ptr); info.access_size = 0; From c6746a759ba74a0d30af470fd06474f7e4ef1845 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 18 Jun 2022 16:20:27 +0800 Subject: [PATCH 144/321] mm/mmap.c: fix missing call to vm_unacct_memory in mmap_region Since the beginning, charged is set to 0 to avoid calling vm_unacct_memory twice because vm_unacct_memory will be called by above unmap_region. But since commit 4f74d2c8e827 ("vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces"), unmap_region doesn't call vm_unacct_memory anymore. So charged shouldn't be set to 0 now otherwise the calling to paired vm_unacct_memory will be missed and leads to imbalanced account. Link: https://lkml.kernel.org/r/20220618082027.43391-1-linmiaohe@huawei.com Fixes: 4f74d2c8e827 ("vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces") Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton --- mm/mmap.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index d71f990447c6e..8236834728ec3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2812,7 +2812,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Undo any partial mapping done by a device driver. */ unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end); - charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); free_vma: From 46ca01f0a3f759b27625558ef68fe4421ef52d1e Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 21 Jun 2022 16:56:17 -0700 Subject: [PATCH 145/321] hugetlb: skip to end of PT page mapping when pte not present Patch series "hugetlb: speed up linear address scanning", v2. At unmap, fork and remap time hugetlb address ranges are linearly scanned. We can optimize these scans if the ranges are sparsely populated. Also, enable page table "Lazy copy" for hugetlb at fork. NOTE: Architectures not defining CONFIG_ARCH_WANT_GENERAL_HUGETLB need to add an arch specific version hugetlb_mask_last_page() to take advantage of sparse address scanning improvements. Baolin Wang added the routine for arm64. Other architectures which could be optimized are: ia64, mips, parisc, powerpc, s390, sh and sparc. This patch (of 4): HugeTLB address ranges are linearly scanned during fork, unmap and remap operations. If a non-present entry is encountered, the code currently continues to the next huge page aligned address. However, a non-present entry implies that the page table page for that entry is not present. Therefore, the linear scan can skip to the end of range mapped by the page table page. This can speed operations on large sparsely populated hugetlb mappings. Create a new routine hugetlb_mask_last_page() that will return an address mask. When the mask is ORed with an address, the result will be the address of the last huge page mapped by the associated page table page. Use this mask to update addresses in routines which linearly scan hugetlb address ranges when a non-present pte is encountered. hugetlb_mask_last_page is related to the implementation of huge_pte_offset as hugetlb_mask_last_page is called when huge_pte_offset returns NULL. This patch only provides a complete hugetlb_mask_last_page implementation when CONFIG_ARCH_WANT_GENERAL_HUGETLB is defined. Architectures which provide their own versions of huge_pte_offset can also provide their own version of hugetlb_mask_last_page. Link: https://lkml.kernel.org/r/20220621235620.291305-1-mike.kravetz@oracle.com Link: https://lkml.kernel.org/r/20220621235620.291305-2-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Tested-by: Baolin Wang Reviewed-by: Baolin Wang Acked-by: Muchun Song Reported-by: kernel test robot Cc: Michal Hocko Cc: Peter Xu Cc: Naoya Horiguchi Cc: James Houghton Cc: Mina Almasry Cc: "Aneesh Kumar K.V" Cc: Anshuman Khandual Cc: Paul Walmsley Cc: Christian Borntraeger Cc: Catalin Marinas Cc: Will Deacon Cc: Rolf Eike Beer Cc: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 1 + mm/hugetlb.c | 56 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 52 insertions(+), 5 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c6cccfaf8708b..ce30fad5fd139 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -194,6 +194,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); +unsigned long hugetlb_mask_last_page(struct hstate *h); int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index faf816b6c573d..c003b37308ab3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4727,6 +4727,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, unsigned long npages = pages_per_huge_page(h); struct address_space *mapping = src_vma->vm_file->f_mapping; struct mmu_notifier_range range; + unsigned long last_addr_mask; int ret = 0; if (cow) { @@ -4746,11 +4747,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, i_mmap_lock_read(mapping); } + last_addr_mask = hugetlb_mask_last_page(h); for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = huge_pte_offset(src, addr, sz); - if (!src_pte) + if (!src_pte) { + addr |= last_addr_mask; continue; + } dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; @@ -4767,8 +4771,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * after taking the lock below. */ dst_entry = huge_ptep_get(dst_pte); - if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) + if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) { + addr |= last_addr_mask; continue; + } dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); @@ -4933,6 +4939,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; unsigned long old_end = old_addr + len; + unsigned long last_addr_mask; unsigned long old_addr_copy; pte_t *src_pte, *dst_pte; struct mmu_notifier_range range; @@ -4948,12 +4955,16 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); + last_addr_mask = hugetlb_mask_last_page(h); /* Prevent race with file truncation */ i_mmap_lock_write(mapping); for (; old_addr < old_end; old_addr += sz, new_addr += sz) { src_pte = huge_pte_offset(mm, old_addr, sz); - if (!src_pte) + if (!src_pte) { + old_addr |= last_addr_mask; + new_addr |= last_addr_mask; continue; + } if (huge_pte_none(huge_ptep_get(src_pte))) continue; @@ -4998,6 +5009,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + unsigned long last_addr_mask; bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); @@ -5018,11 +5030,14 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); mmu_notifier_invalidate_range_start(&range); + last_addr_mask = hugetlb_mask_last_page(h); address = start; for (; address < end; address += sz) { ptep = huge_pte_offset(mm, address, sz); - if (!ptep) + if (!ptep) { + address |= last_addr_mask; continue; + } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { @@ -6290,6 +6305,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long pages = 0, psize = huge_page_size(h); bool shared_pmd = false; struct mmu_notifier_range range; + unsigned long last_addr_mask; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; @@ -6306,12 +6322,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); + last_addr_mask = hugetlb_mask_last_page(h); i_mmap_lock_write(vma->vm_file->f_mapping); for (; address < end; address += psize) { spinlock_t *ptl; ptep = huge_pte_offset(mm, address, psize); - if (!ptep) + if (!ptep) { + address |= last_addr_mask; continue; + } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { /* @@ -6861,6 +6880,33 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return (pte_t *)pmd; } +/* + * Return a mask that can be used to update an address to the last huge + * page in a page table page mapping size. Used to skip non-present + * page table entries when linearly scanning address ranges. Architectures + * with unique huge page to page table relationships can define their own + * version of this routine. + */ +unsigned long hugetlb_mask_last_page(struct hstate *h) +{ + unsigned long hp_size = huge_page_size(h); + + if (hp_size == PUD_SIZE) + return P4D_SIZE - PUD_SIZE; + else if (hp_size == PMD_SIZE) + return PUD_SIZE - PMD_SIZE; + else + return 0UL; +} + +#else + +/* See description above. Architectures can provide their own version. */ +__weak unsigned long hugetlb_mask_last_page(struct hstate *h) +{ + return 0UL; +} + #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /* From 191206dd740e343a2bb3623087eac4b255ccd682 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 21 Jun 2022 16:56:18 -0700 Subject: [PATCH 146/321] arm64/hugetlb: implement arm64 specific hugetlb_mask_last_page The HugeTLB address ranges are linearly scanned during fork, unmap and remap operations, and the linear scan can skip to the end of range mapped by the page table page if hitting a non-present entry, which can help to speed linear scanning of the HugeTLB address ranges. So hugetlb_mask_last_page() is introduced to help to update the address in the loop of HugeTLB linear scanning with getting the last huge page mapped by the associated page table page[1], when a non-present entry is encountered. Considering ARM64 specific cont-pte/pmd size HugeTLB, this patch implemented an ARM64 specific hugetlb_mask_last_page() to help this case. [1] https://lore.kernel.org/linux-mm/20220527225849.284839-1-mike.kravetz@oracle.com/ Link: https://lkml.kernel.org/r/20220621235620.291305-3-mike.kravetz@oracle.com Signed-off-by: Baolin Wang Signed-off-by: Mike Kravetz Acked-by: Muchun Song Cc: "Aneesh Kumar K.V" Cc: Anshuman Khandual Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David Hildenbrand Cc: James Houghton Cc: kernel test robot Cc: Michal Hocko Cc: Mina Almasry Cc: Naoya Horiguchi Cc: Paul Walmsley Cc: Peter Xu Cc: Rolf Eike Beer Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm64/mm/hugetlbpage.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 3be8f25aa5bea..58b89b9d13e07 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -368,6 +368,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; } +unsigned long hugetlb_mask_last_page(struct hstate *h) +{ + unsigned long hp_size = huge_page_size(h); + + switch (hp_size) { + case PUD_SIZE: + return PGDIR_SIZE - PUD_SIZE; + case CONT_PMD_SIZE: + return PUD_SIZE - CONT_PMD_SIZE; + case PMD_SIZE: + return PUD_SIZE - PMD_SIZE; + case CONT_PTE_SIZE: + return PMD_SIZE - CONT_PTE_SIZE; + default: + break; + } + + return 0UL; +} + pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { size_t pagesize = 1UL << shift; From e198c8e83858cfb7123aaebdc120456ba227528a Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 27 Jun 2022 13:55:37 +0800 Subject: [PATCH 147/321] arm64-hugetlb-implement-arm64-specific-hugetlb_mask_last_page-fix I missed the case that '__PAGETABLE_PMD_FOLDED', was defined, which PUD is same with PMD. Link: https://lkml.kernel.org/r/a14e7b39-6a8a-4609-b4a1-84ac574f5c96@linux.alibaba.com Signed-off-by: Baolin Wang Reported-by: kernel test robot Signed-off-by: Andrew Morton --- arch/arm64/mm/hugetlbpage.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 58b89b9d13e07..7430060cb0d6d 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -373,8 +373,10 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) unsigned long hp_size = huge_page_size(h); switch (hp_size) { +#ifndef __PAGETABLE_PMD_FOLDED case PUD_SIZE: return PGDIR_SIZE - PUD_SIZE; +#endif case CONT_PMD_SIZE: return PUD_SIZE - CONT_PMD_SIZE; case PMD_SIZE: From 40dbd150855ef4098bcff9a4956bd76236174f67 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 21 Jun 2022 16:56:19 -0700 Subject: [PATCH 148/321] hugetlb: do not update address in huge_pmd_unshare As an optimization for loops sequentially processing hugetlb address ranges, huge_pmd_unshare would update a passed address if it unshared a pmd. Updating a loop control variable outside the loop like this is generally a bad idea. These loops are now using hugetlb_mask_last_page to optimize scanning when non-present ptes are discovered. The same can be done when huge_pmd_unshare returns 1 indicating a pmd was unshared. Remove address update from huge_pmd_unshare. Change the passed argument type and update all callers. In loops sequentially processing addresses use hugetlb_mask_last_page to update address if pmd is unshared. Link: https://lkml.kernel.org/r/20220621235620.291305-4-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Acked-by: Muchun Song Reviewed-by: Baolin Wang Cc: "Aneesh Kumar K.V" Cc: Anshuman Khandual Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David Hildenbrand Cc: James Houghton Cc: kernel test robot Cc: Michal Hocko Cc: Mina Almasry Cc: Naoya Horiguchi Cc: Paul Walmsley Cc: Peter Xu Cc: Rolf Eike Beer Cc: Will Deacon Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 46 +++++++++++++++++------------------------ mm/rmap.c | 4 ++-- 3 files changed, 23 insertions(+), 31 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ce30fad5fd139..75ee739d815b9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -196,7 +196,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); unsigned long hugetlb_mask_last_page(struct hstate *h); int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep); + unsigned long addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -243,7 +243,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( static inline int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep) + unsigned long addr, pte_t *ptep) { return 0; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c003b37308ab3..e2e50714b4e47 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4940,7 +4940,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; unsigned long old_end = old_addr + len; unsigned long last_addr_mask; - unsigned long old_addr_copy; pte_t *src_pte, *dst_pte; struct mmu_notifier_range range; bool shared_pmd = false; @@ -4968,14 +4967,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, if (huge_pte_none(huge_ptep_get(src_pte))) continue; - /* old_addr arg to huge_pmd_unshare() is a pointer and so the - * arg may be modified. Pass a copy instead to preserve the - * value in old_addr. - */ - old_addr_copy = old_addr; - - if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) { + if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { shared_pmd = true; + old_addr |= last_addr_mask; + new_addr |= last_addr_mask; continue; } @@ -5040,10 +5035,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct } ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, vma, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, address, ptep)) { spin_unlock(ptl); tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); force_flush = true; + address |= last_addr_mask; continue; } @@ -6332,7 +6328,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, continue; } ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, vma, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, address, ptep)) { /* * When uffd-wp is enabled on the vma, unshare * shouldn't happen at all. Warn about it if it @@ -6342,6 +6338,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, pages++; spin_unlock(ptl); shared_pmd = true; + address |= last_addr_mask; continue; } pte = huge_ptep_get(ptep); @@ -6764,11 +6761,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, * 0 the underlying pte page is not shared, or it is the last user */ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep) + unsigned long addr, pte_t *ptep) { - pgd_t *pgd = pgd_offset(mm, *addr); - p4d_t *p4d = p4d_offset(pgd, *addr); - pud_t *pud = pud_offset(p4d, *addr); + pgd_t *pgd = pgd_offset(mm, addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); i_mmap_assert_write_locked(vma->vm_file->f_mapping); BUG_ON(page_count(virt_to_page(ptep)) == 0); @@ -6778,14 +6775,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); - /* - * This update of passed address optimizes loops sequentially - * processing addresses in increments of huge page size (PMD_SIZE - * in this case). By clearing the pud, a PUD_SIZE area is unmapped. - * Update address to the 'last page' in the cleared area so that - * calling loop can move to first page past this area. - */ - *addr |= PUD_SIZE - PMD_SIZE; return 1; } @@ -6797,7 +6786,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, } int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep) + unsigned long addr, pte_t *ptep) { return 0; } @@ -6904,6 +6893,12 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) /* See description above. Architectures can provide their own version. */ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) { + unsigned long hp_size = huge_page_size(h); + +#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE + if (hp_size == PMD_SIZE) + return PUD_SIZE - PMD_SIZE; +#endif return 0UL; } @@ -7130,14 +7125,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) mmu_notifier_invalidate_range_start(&range); i_mmap_lock_write(vma->vm_file->f_mapping); for (address = start; address < end; address += PUD_SIZE) { - unsigned long tmp = address; - ptep = huge_pte_offset(mm, address, sz); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); - /* We don't want 'address' to be changed */ - huge_pmd_unshare(mm, vma, &tmp, ptep); + huge_pmd_unshare(mm, vma, address, ptep); spin_unlock(ptl); } flush_hugetlb_tlb_range(vma, start, end); diff --git a/mm/rmap.c b/mm/rmap.c index 2a5f1d2d1247a..edc06c52bc82e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1559,7 +1559,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * do this outside rmap routines. */ VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); - if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { + if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) { flush_tlb_range(vma, range.start, range.end); mmu_notifier_invalidate_range(mm, range.start, range.end); @@ -1935,7 +1935,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * do this outside rmap routines. */ VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); - if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { + if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) { flush_tlb_range(vma, range.start, range.end); mmu_notifier_invalidate_range(mm, range.start, range.end); From 4d04f1a7305c1a78724e7a365e2f66b639055848 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 22 Jun 2022 17:04:46 +1000 Subject: [PATCH 149/321] hugetlb: fix an unused variable warning/error Link: https://lkml.kernel.org/r/20220622171117.70850960@canb.auug.org.au Signed-off-by: Stephen Rothwell Cc: Mike Kravetz Signed-off-by: Andrew Morton --- mm/hugetlb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e2e50714b4e47..08e88d04a2d6d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6893,10 +6893,8 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) /* See description above. Architectures can provide their own version. */ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) { - unsigned long hp_size = huge_page_size(h); - #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE - if (hp_size == PMD_SIZE) + if (huge_page_size(h) == PMD_SIZE) return PUD_SIZE - PMD_SIZE; #endif return 0UL; From e573d53e2aea1f1c59fc4076de68ed2feb7638a9 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 21 Jun 2022 16:56:20 -0700 Subject: [PATCH 150/321] hugetlb: lazy page table copies in fork() Lazy page table copying at fork time was introduced with commit d992895ba2b2 ("[PATCH] Lazy page table copies in fork()"). At the time, hugetlb was very new and did not support page faulting. As a result, it was excluded. When full page fault support was added for hugetlb, the exclusion was not removed. Simply remove the check that prevents lazy copying of hugetlb page tables at fork. Of course, like other mappings this only applies to shared mappings. Lazy page table copying at fork will be less advantageous for hugetlb mappings because: - There are fewer page table entries with hugetlb - hugetlb pmds can be shared instead of copied In any case, completely eliminating the copy at fork time should speed things up. Link: https://lkml.kernel.org/r/20220621235620.291305-5-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Acked-by: Muchun Song Acked-by: David Hildenbrand Cc: "Aneesh Kumar K.V" Cc: Anshuman Khandual Cc: Baolin Wang Cc: Catalin Marinas Cc: Christian Borntraeger Cc: James Houghton Cc: kernel test robot Cc: Michal Hocko Cc: Mina Almasry Cc: Naoya Horiguchi Cc: Paul Walmsley Cc: Peter Xu Cc: Rolf Eike Beer Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 29e44dcc0179c..f77e8b2a651ec 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1262,7 +1262,7 @@ vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) if (userfaultfd_wp(dst_vma)) return true; - if (src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) + if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return true; if (src_vma->anon_vma) From 8f3dcbde67437e8b7523f79e1671bb7086de70cc Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:17 +0100 Subject: [PATCH 151/321] mm/page_alloc: add page->buddy_list and page->pcp_list Patch series "Drain remote per-cpu directly", v5. Some setups, notably NOHZ_FULL CPUs, may be running realtime or latency-sensitive applications that cannot tolerate interference due to per-cpu drain work queued by __drain_all_pages(). Introduce a new mechanism to remotely drain the per-cpu lists. It is made possible by remotely locking 'struct per_cpu_pages' new per-cpu spinlocks. This has two advantages, the time to drain is more predictable and other unrelated tasks are not interrupted. This series has the same intent as Nicolas' series "mm/page_alloc: Remote per-cpu lists drain support" -- avoid interference of a high priority task due to a workqueue item draining per-cpu page lists. While many workloads can tolerate a brief interruption, it may cause a real-time task running on a NOHZ_FULL CPU to miss a deadline and at minimum, the draining is non-deterministic. Currently an IRQ-safe local_lock protects the page allocator per-cpu lists. The local_lock on its own prevents migration and the IRQ disabling protects from corruption due to an interrupt arriving while a page allocation is in progress. This series adjusts the locking. A spinlock is added to struct per_cpu_pages to protect the list contents while local_lock_irq is ultimately replaced by just the spinlock in the final patch. This allows a remote CPU to safely. Follow-on work should allow the spin_lock_irqsave to be converted to spin_lock to avoid IRQs being disabled/enabled in most cases. The follow-on patch will be one kernel release later as it is relatively high risk and it'll make bisections more clear if there are any problems. Patch 1 is a cosmetic patch to clarify when page->lru is storing buddy pages and when it is storing per-cpu pages. Patch 2 shrinks per_cpu_pages to make room for a spin lock. Strictly speaking this is not necessary but it avoids per_cpu_pages consuming another cache line. Patch 3 is a preparation patch to avoid code duplication. Patch 4 is a minor correction. Patch 5 uses a spin_lock to protect the per_cpu_pages contents while still relying on local_lock to prevent migration, stabilise the pcp lookup and prevent IRQ reentrancy. Patch 6 remote drains per-cpu pages directly instead of using a workqueue. Patch 7 uses a normal spinlock instead of local_lock for remote draining This patch (of 7): The page allocator uses page->lru for storing pages on either buddy or PCP lists. Create page->buddy_list and page->pcp_list as a union with page->lru. This is simply to clarify what type of list a page is on in the page allocator. No functional change intended. [minchan@kernel.org: fix page lru fields in macros] Link: https://lkml.kernel.org/r/20220624125423.6126-2-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Minchan Kim Acked-by: Minchan Kim Reviewed-by: Nicolas Saenz Julienne Acked-by: Vlastimil Babka Tested-by: Yu Zhao Cc: Marcelo Tosatti Cc: Michal Hocko Cc: Hugh Dickins Cc: Marek Szyprowski Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 5 +++++ mm/page_alloc.c | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5cc2cb396ca7f..acbd8d03e01ef 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -88,6 +88,7 @@ struct page { */ union { struct list_head lru; + /* Or, for the Unevictable "LRU list" slot */ struct { /* Always even, to negate PageTail */ @@ -95,6 +96,10 @@ struct page { /* Count page's or folio's mlocks */ unsigned int mlock_count; }; + + /* Or, free page */ + struct list_head buddy_list; + struct list_head pcp_list; }; /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1b134f0c7e39a..6bbe08a3f84c3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -793,7 +793,7 @@ static inline bool set_page_guard(struct zone *zone, struct page *page, return false; __SetPageGuard(page); - INIT_LIST_HEAD(&page->lru); + INIT_LIST_HEAD(&page->buddy_list); set_page_private(page, order); /* Guard pages are not available for any usage */ __mod_zone_freepage_state(zone, -(1 << order), migratetype); @@ -936,7 +936,7 @@ static inline void add_to_free_list(struct page *page, struct zone *zone, { struct free_area *area = &zone->free_area[order]; - list_add(&page->lru, &area->free_list[migratetype]); + list_add(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; } @@ -946,7 +946,7 @@ static inline void add_to_free_list_tail(struct page *page, struct zone *zone, { struct free_area *area = &zone->free_area[order]; - list_add_tail(&page->lru, &area->free_list[migratetype]); + list_add_tail(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; } @@ -960,7 +960,7 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, { struct free_area *area = &zone->free_area[order]; - list_move_tail(&page->lru, &area->free_list[migratetype]); + list_move_tail(&page->buddy_list, &area->free_list[migratetype]); } static inline void del_page_from_free_list(struct page *page, struct zone *zone, @@ -970,7 +970,7 @@ static inline void del_page_from_free_list(struct page *page, struct zone *zone, if (page_reported(page)) __ClearPageReported(page); - list_del(&page->lru); + list_del(&page->buddy_list); __ClearPageBuddy(page); set_page_private(page, 0); zone->free_area[order].nr_free--; @@ -1508,11 +1508,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, do { int mt; - page = list_last_entry(list, struct page, lru); + page = list_last_entry(list, struct page, pcp_list); mt = get_pcppage_migratetype(page); /* must delete to avoid corrupting pcp list */ - list_del(&page->lru); + list_del(&page->pcp_list); count -= nr_pages; pcp->count -= nr_pages; @@ -3072,7 +3072,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * for IO devices that can merge IO requests if the physical * pages are ordered properly. */ - list_add_tail(&page->lru, list); + list_add_tail(&page->pcp_list, list); allocated++; if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, @@ -3322,7 +3322,7 @@ void mark_free_pages(struct zone *zone) for_each_migratetype_order(order, t) { list_for_each_entry(page, - &zone->free_area[order].free_list[t], lru) { + &zone->free_area[order].free_list[t], buddy_list) { unsigned long i; pfn = page_to_pfn(page); @@ -3411,7 +3411,7 @@ static void free_unref_page_commit(struct page *page, int migratetype, __count_vm_event(PGFREE); pcp = this_cpu_ptr(zone->per_cpu_pageset); pindex = order_to_pindex(migratetype, order); - list_add(&page->lru, &pcp->lists[pindex]); + list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; /* @@ -3674,8 +3674,8 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, return NULL; } - page = list_first_entry(list, struct page, lru); - list_del(&page->lru); + page = list_first_entry(list, struct page, pcp_list); + list_del(&page->pcp_list); pcp->count -= 1 << order; } while (check_new_pcp(page, order)); From 56857cdd0a927b351fa7625970df259efbe9fed2 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:18 +0100 Subject: [PATCH 152/321] mm/page_alloc: use only one PCP list for THP-sized allocations The per_cpu_pages is cache-aligned on a standard x86-64 distribution configuration but a later patch will add a new field which would push the structure into the next cache line. Use only one list to store THP-sized pages on the per-cpu list. This assumes that the vast majority of THP-sized allocations are GFP_MOVABLE but even if it was another type, it would not contribute to serious fragmentation that potentially causes a later THP allocation failure. Align per_cpu_pages on the cacheline boundary to ensure there is no false cache sharing. After this patch, the structure sizing is; struct per_cpu_pages { int count; /* 0 4 */ int high; /* 4 4 */ int batch; /* 8 4 */ short int free_factor; /* 12 2 */ short int expire; /* 14 2 */ struct list_head lists[13]; /* 16 208 */ /* size: 256, cachelines: 4, members: 6 */ /* padding: 32 */ } __attribute__((__aligned__(64))); Link: https://lkml.kernel.org/r/20220624125423.6126-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Minchan Kim Acked-by: Minchan Kim Acked-by: Vlastimil Babka Tested-by: Yu Zhao Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Cc: Nicolas Saenz Julienne Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 11 +++++++---- mm/page_alloc.c | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 735bf5b379491..ce15c27dddb55 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -355,15 +355,18 @@ enum zone_watermarks { }; /* - * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional - * for pageblock size for THP if configured. + * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list + * for THP which will usually be GFP_MOVABLE. Even if it is another type, + * it should not contribute to serious fragmentation causing THP allocation + * failures. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define NR_PCP_THP 1 #else #define NR_PCP_THP 0 #endif -#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) +#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) +#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) /* * Shift to encode migratetype and order in the same integer, with order @@ -389,7 +392,7 @@ struct per_cpu_pages { /* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[NR_PCP_LISTS]; -}; +} ____cacheline_aligned_in_smp; struct per_cpu_zonestat { #ifdef CONFIG_SMP diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6bbe08a3f84c3..9ec039cad344a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -653,7 +653,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order) #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order > PAGE_ALLOC_COSTLY_ORDER) { VM_BUG_ON(order != pageblock_order); - base = PAGE_ALLOC_COSTLY_ORDER + 1; + return NR_LOWORDER_PCP_LISTS; } #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); @@ -667,7 +667,7 @@ static inline int pindex_to_order(unsigned int pindex) int order = pindex / MIGRATE_PCPTYPES; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (order > PAGE_ALLOC_COSTLY_ORDER) + if (pindex == NR_LOWORDER_PCP_LISTS) order = pageblock_order; #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); From 76c61fed001a73236ef19d840420cf2e97166517 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:19 +0100 Subject: [PATCH 153/321] mm/page_alloc: split out buddy removal code from rmqueue into separate helper This is a preparation page to allow the buddy removal code to be reused in a later patch. No functional change. Link: https://lkml.kernel.org/r/20220624125423.6126-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Minchan Kim Acked-by: Minchan Kim Reviewed-by: Nicolas Saenz Julienne Acked-by: Vlastimil Babka Tested-by: Yu Zhao Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/page_alloc.c | 81 ++++++++++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9ec039cad344a..2db3b73fa5372 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3641,6 +3641,43 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, #endif } +static __always_inline +struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, + unsigned int order, unsigned int alloc_flags, + int migratetype) +{ + struct page *page; + unsigned long flags; + + do { + page = NULL; + spin_lock_irqsave(&zone->lock, flags); + /* + * order-0 request can reach here when the pcplist is skipped + * due to non-CMA allocation context. HIGHATOMIC area is + * reserved for high-order atomic allocation, so order-0 + * request should skip it. + */ + if (order > 0 && alloc_flags & ALLOC_HARDER) + page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); + if (!page) { + page = __rmqueue(zone, order, migratetype, alloc_flags); + if (!page) { + spin_unlock_irqrestore(&zone->lock, flags); + return NULL; + } + } + __mod_zone_freepage_state(zone, -(1 << order), + get_pcppage_migratetype(page)); + spin_unlock_irqrestore(&zone->lock, flags); + } while (check_new_pages(page, order)); + + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone, 1); + + return page; +} + /* Remove page from the per-cpu list, caller must protect the list */ static inline struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, @@ -3721,9 +3758,14 @@ struct page *rmqueue(struct zone *preferred_zone, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) { - unsigned long flags; struct page *page; + /* + * We most definitely don't want callers attempting to + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); + if (likely(pcp_allowed_order(order))) { /* * MIGRATE_MOVABLE pcplist could have the pages on CMA area and @@ -3737,35 +3779,10 @@ struct page *rmqueue(struct zone *preferred_zone, } } - /* - * We most definitely don't want callers attempting to - * allocate greater than order-1 page units with __GFP_NOFAIL. - */ - WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - - do { - page = NULL; - spin_lock_irqsave(&zone->lock, flags); - /* - * order-0 request can reach here when the pcplist is skipped - * due to non-CMA allocation context. HIGHATOMIC area is - * reserved for high-order atomic allocation, so order-0 - * request should skip it. - */ - if (order > 0 && alloc_flags & ALLOC_HARDER) - page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); - if (!page) { - page = __rmqueue(zone, order, migratetype, alloc_flags); - if (!page) - goto failed; - } - __mod_zone_freepage_state(zone, -(1 << order), - get_pcppage_migratetype(page)); - spin_unlock_irqrestore(&zone->lock, flags); - } while (check_new_pages(page, order)); - - __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - zone_statistics(preferred_zone, zone, 1); + page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, + migratetype); + if (unlikely(!page)) + return NULL; out: /* Separate test+clear to avoid unnecessary atomics */ @@ -3776,10 +3793,6 @@ struct page *rmqueue(struct zone *preferred_zone, VM_BUG_ON_PAGE(page && bad_range(zone, page), page); return page; - -failed: - spin_unlock_irqrestore(&zone->lock, flags); - return NULL; } #ifdef CONFIG_FAIL_PAGE_ALLOC From ed53e047a7833ec7dec50043985061d6b362484f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:20 +0100 Subject: [PATCH 154/321] mm/page_alloc: remove mistaken page == NULL check in rmqueue If a page allocation fails, the ZONE_BOOSTER_WATERMARK should be tested, cleared and kswapd woken whether the allocation attempt was via the PCP or directly via the buddy list. Remove the page == NULL so the ZONE_BOOSTED_WATERMARK bit is checked unconditionally. As it is unlikely that ZONE_BOOSTED_WATERMARK is set, mark the branch accordingly. Link: https://lkml.kernel.org/r/20220624125423.6126-5-mgorman@techsingularity.net Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka Tested-by: Yu Zhao Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Cc: Minchan Kim Cc: Nicolas Saenz Julienne Signed-off-by: Andrew Morton --- mm/page_alloc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2db3b73fa5372..ad1822055fd6f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3781,12 +3781,10 @@ struct page *rmqueue(struct zone *preferred_zone, page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, migratetype); - if (unlikely(!page)) - return NULL; out: /* Separate test+clear to avoid unnecessary atomics */ - if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { + if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); wakeup_kswapd(zone, 0, 0, zone_idx(zone)); } From c40b26974413a605608373a9a559a3cdd9a70e4f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:21 +0100 Subject: [PATCH 155/321] mm/page_alloc: protect PCP lists with a spinlock Currently the PCP lists are protected by using local_lock_irqsave to prevent migration and IRQ reentrancy but this is inconvenient. Remote draining of the lists is impossible and a workqueue is required and every task allocation/free must disable then enable interrupts which is expensive. As preparation for dealing with both of those problems, protect the lists with a spinlock. The IRQ-unsafe version of the lock is used because IRQs are already disabled by local_lock_irqsave. spin_trylock is used in combination with local_lock_irqsave() but later will be replaced with a spin_trylock_irqsave when the local_lock is removed. The per_cpu_pages still fits within the same number of cache lines after this patch relative to before the series. struct per_cpu_pages { spinlock_t lock; /* 0 4 */ int count; /* 4 4 */ int high; /* 8 4 */ int batch; /* 12 4 */ short int free_factor; /* 16 2 */ short int expire; /* 18 2 */ /* XXX 4 bytes hole, try to pack */ struct list_head lists[13]; /* 24 208 */ /* size: 256, cachelines: 4, members: 7 */ /* sum members: 228, holes: 1, sum holes: 4 */ /* padding: 24 */ } __attribute__((__aligned__(64))); There is overhead in the fast path due to acquiring the spinlock even though the spinlock is per-cpu and uncontended in the common case. Page Fault Test (PFT) running on a 1-socket reported the following results on a 1 socket machine. 5.19.0-rc3 5.19.0-rc3 vanilla mm-pcpspinirq-v5r16 Hmean faults/sec-1 869275.7381 ( 0.00%) 874597.5167 * 0.61%* Hmean faults/sec-3 2370266.6681 ( 0.00%) 2379802.0362 * 0.40%* Hmean faults/sec-5 2701099.7019 ( 0.00%) 2664889.7003 * -1.34%* Hmean faults/sec-7 3517170.9157 ( 0.00%) 3491122.8242 * -0.74%* Hmean faults/sec-8 3965729.6187 ( 0.00%) 3939727.0243 * -0.66%* There is a small hit in the number of faults per second but given that the results are more stable, it's borderline noise. Link: https://lkml.kernel.org/r/20220624125423.6126-6-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Yu Zhao Reviewed-by: Nicolas Saenz Julienne Tested-by: Nicolas Saenz Julienne Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Cc: Minchan Kim Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 1 + mm/page_alloc.c | 118 +++++++++++++++++++++++++++++++++-------- 2 files changed, 98 insertions(+), 21 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ce15c27dddb55..ed3ebe5e9e50a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -382,6 +382,7 @@ enum zone_watermarks { /* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { + spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ad1822055fd6f..a2bb6b4f449f9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -133,6 +133,20 @@ static DEFINE_PER_CPU(struct pagesets, pagesets) = { .lock = INIT_LOCAL_LOCK(lock), }; +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) +/* + * On SMP, spin_trylock is sufficient protection. + * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. + */ +#define pcp_trylock_prepare(flags) do { } while (0) +#define pcp_trylock_finish(flag) do { } while (0) +#else + +/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ +#define pcp_trylock_prepare(flags) local_irq_save(flags) +#define pcp_trylock_finish(flags) local_irq_restore(flags) +#endif + #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); EXPORT_PER_CPU_SYMBOL(numa_node); @@ -3101,15 +3115,22 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { - unsigned long flags; int to_drain, batch; - local_lock_irqsave(&pagesets.lock, flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); - if (to_drain > 0) + if (to_drain > 0) { + unsigned long flags; + + /* + * free_pcppages_bulk expects IRQs disabled for zone->lock + * so even though pcp->lock is not intended to be IRQ-safe, + * it's needed in this context. + */ + spin_lock_irqsave(&pcp->lock, flags); free_pcppages_bulk(zone, to_drain, pcp, 0); - local_unlock_irqrestore(&pagesets.lock, flags); + spin_unlock_irqrestore(&pcp->lock, flags); + } } #endif @@ -3122,16 +3143,17 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) */ static void drain_pages_zone(unsigned int cpu, struct zone *zone) { - unsigned long flags; struct per_cpu_pages *pcp; - local_lock_irqsave(&pagesets.lock, flags); - pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); - if (pcp->count) - free_pcppages_bulk(zone, pcp->count, pcp, 0); + if (pcp->count) { + unsigned long flags; - local_unlock_irqrestore(&pagesets.lock, flags); + /* See drain_zone_pages on why this is disabling IRQs */ + spin_lock_irqsave(&pcp->lock, flags); + free_pcppages_bulk(zone, pcp->count, pcp, 0); + spin_unlock_irqrestore(&pcp->lock, flags); + } } /* @@ -3399,17 +3421,15 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, return min(READ_ONCE(pcp->batch) << 2, high); } -static void free_unref_page_commit(struct page *page, int migratetype, +static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, + struct page *page, int migratetype, unsigned int order) { - struct zone *zone = page_zone(page); - struct per_cpu_pages *pcp; int high; int pindex; bool free_high; __count_vm_event(PGFREE); - pcp = this_cpu_ptr(zone->per_cpu_pageset); pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; @@ -3436,6 +3456,9 @@ static void free_unref_page_commit(struct page *page, int migratetype, void free_unref_page(struct page *page, unsigned int order) { unsigned long flags; + unsigned long __maybe_unused UP_flags; + struct per_cpu_pages *pcp; + struct zone *zone; unsigned long pfn = page_to_pfn(page); int migratetype; @@ -3459,7 +3482,16 @@ void free_unref_page(struct page *page, unsigned int order) } local_lock_irqsave(&pagesets.lock, flags); - free_unref_page_commit(page, migratetype, order); + zone = page_zone(page); + pcp_trylock_prepare(UP_flags); + pcp = this_cpu_ptr(zone->per_cpu_pageset); + if (spin_trylock(&pcp->lock)) { + free_unref_page_commit(zone, pcp, page, migratetype, order); + spin_unlock(&pcp->lock); + } else { + free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); + } + pcp_trylock_finish(UP_flags); local_unlock_irqrestore(&pagesets.lock, flags); } @@ -3469,6 +3501,8 @@ void free_unref_page(struct page *page, unsigned int order) void free_unref_page_list(struct list_head *list) { struct page *page, *next; + struct per_cpu_pages *pcp = NULL; + struct zone *locked_zone = NULL; unsigned long flags; int batch_count = 0; int migratetype; @@ -3495,6 +3529,17 @@ void free_unref_page_list(struct list_head *list) local_lock_irqsave(&pagesets.lock, flags); list_for_each_entry_safe(page, next, list, lru) { + struct zone *zone = page_zone(page); + + /* Different zone, different pcp lock. */ + if (zone != locked_zone) { + if (pcp) + spin_unlock(&pcp->lock); + locked_zone = zone; + pcp = this_cpu_ptr(zone->per_cpu_pageset); + spin_lock(&pcp->lock); + } + /* * Non-isolated types over MIGRATE_PCPTYPES get added * to the MIGRATE_MOVABLE pcp list. @@ -3504,18 +3549,24 @@ void free_unref_page_list(struct list_head *list) migratetype = MIGRATE_MOVABLE; trace_mm_page_free_batched(page); - free_unref_page_commit(page, migratetype, 0); + free_unref_page_commit(zone, pcp, page, migratetype, 0); /* * Guard against excessive IRQ disabled times when we get * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { + spin_unlock(&pcp->lock); local_unlock_irqrestore(&pagesets.lock, flags); batch_count = 0; local_lock_irqsave(&pagesets.lock, flags); + pcp = this_cpu_ptr(locked_zone->per_cpu_pageset); + spin_lock(&pcp->lock); } } + + if (pcp) + spin_unlock(&pcp->lock); local_unlock_irqrestore(&pagesets.lock, flags); } @@ -3729,18 +3780,31 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct list_head *list; struct page *page; unsigned long flags; + unsigned long __maybe_unused UP_flags; local_lock_irqsave(&pagesets.lock, flags); + /* + * spin_trylock may fail due to a parallel drain. In the future, the + * trylock will also protect against IRQ reentrancy. + */ + pcp = this_cpu_ptr(zone->per_cpu_pageset); + pcp_trylock_prepare(UP_flags); + if (!spin_trylock(&pcp->lock)) { + pcp_trylock_finish(UP_flags); + return NULL; + } + /* * On allocation, reduce the number of pages that are batch freed. * See nr_pcp_free() where free_factor is increased for subsequent * frees. */ - pcp = this_cpu_ptr(zone->per_cpu_pageset); pcp->free_factor >>= 1; list = &pcp->lists[order_to_pindex(migratetype, order)]; page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); + spin_unlock(&pcp->lock); + pcp_trylock_finish(UP_flags); local_unlock_irqrestore(&pagesets.lock, flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); @@ -3775,7 +3839,8 @@ struct page *rmqueue(struct zone *preferred_zone, migratetype != MIGRATE_MOVABLE) { page = rmqueue_pcplist(preferred_zone, zone, order, gfp_flags, migratetype, alloc_flags); - goto out; + if (likely(page)) + goto out; } } @@ -5252,6 +5317,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, { struct page *page; unsigned long flags; + unsigned long __maybe_unused UP_flags; struct zone *zone; struct zoneref *z; struct per_cpu_pages *pcp; @@ -5332,11 +5398,15 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, if (unlikely(!zone)) goto failed; - /* Attempt the batch allocation */ + /* Is a parallel drain in progress? */ local_lock_irqsave(&pagesets.lock, flags); + pcp_trylock_prepare(UP_flags); pcp = this_cpu_ptr(zone->per_cpu_pageset); - pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; + if (!spin_trylock(&pcp->lock)) + goto failed_irq; + /* Attempt the batch allocation */ + pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; while (nr_populated < nr_pages) { /* Skip existing pages */ @@ -5349,8 +5419,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ - if (!nr_account) + if (!nr_account) { + spin_unlock(&pcp->lock); goto failed_irq; + } break; } nr_account++; @@ -5363,6 +5435,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nr_populated++; } + spin_unlock(&pcp->lock); + pcp_trylock_finish(UP_flags); local_unlock_irqrestore(&pagesets.lock, flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); @@ -5372,6 +5446,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, return nr_populated; failed_irq: + pcp_trylock_finish(UP_flags); local_unlock_irqrestore(&pagesets.lock, flags); failed: @@ -7017,6 +7092,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta memset(pcp, 0, sizeof(*pcp)); memset(pzstats, 0, sizeof(*pzstats)); + spin_lock_init(&pcp->lock); for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) INIT_LIST_HEAD(&pcp->lists[pindex]); From e41a10f7449e8244502cbe4d47203a58ccf20cd1 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 7 Jul 2022 13:06:35 -0700 Subject: [PATCH 156/321] mm-page_alloc-protect-pcp-lists-with-a-spinlock-fix add missing local_unlock_irqrestore() on contention path Reported-by: kernel test robot Reported-by: Dan Carpenter Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Nicolas Saenz Julienne Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/page_alloc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a2bb6b4f449f9..1f9d7f2e91eeb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3792,6 +3792,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, pcp_trylock_prepare(UP_flags); if (!spin_trylock(&pcp->lock)) { pcp_trylock_finish(UP_flags); + local_unlock_irqrestore(&pagesets.lock, flags); return NULL; } From 235a341d7762ec8c04f418ea48cae6c0f8e2ce73 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Fri, 24 Jun 2022 13:54:22 +0100 Subject: [PATCH 157/321] mm/page_alloc: remotely drain per-cpu lists Some setups, notably NOHZ_FULL CPUs, are too busy to handle the per-cpu drain work queued by __drain_all_pages(). So introduce a new mechanism to remotely drain the per-cpu lists. It is made possible by remotely locking 'struct per_cpu_pages' new per-cpu spinlocks. A benefit of this new scheme is that drain operations are now migration safe. There was no observed performance degradation vs. the previous scheme. Both netperf and hackbench were run in parallel to triggering the __drain_all_pages(NULL, true) code path around ~100 times per second. The new scheme performs a bit better (~5%), although the important point here is there are no performance regressions vs. the previous mechanism. Per-cpu lists draining happens only in slow paths. Minchan Kim tested an earlier version and reported; My workload is not NOHZ CPUs but run apps under heavy memory pressure so they goes to direct reclaim and be stuck on drain_all_pages until work on workqueue run. unit: nanosecond max(dur) avg(dur) count(dur) 166713013 487511.77786438033 1283 From traces, system encountered the drain_all_pages 1283 times and worst case was 166ms and avg was 487us. The other problem was alloc_contig_range in CMA. The PCP draining takes several hundred millisecond sometimes though there is no memory pressure or a few of pages to be migrated out but CPU were fully booked. Your patch perfectly removed those wasted time. Link: https://lkml.kernel.org/r/20220624125423.6126-7-mgorman@techsingularity.net Signed-off-by: Nicolas Saenz Julienne Signed-off-by: Mel Gorman Tested-by: Yu Zhao Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Cc: Minchan Kim Signed-off-by: Andrew Morton --- mm/page_alloc.c | 58 ++++--------------------------------------------- 1 file changed, 4 insertions(+), 54 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1f9d7f2e91eeb..1a8abd8ef1ded 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -165,13 +165,7 @@ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); #endif -/* work_structs for global per-cpu drains */ -struct pcpu_drain { - struct zone *zone; - struct work_struct work; -}; static DEFINE_MUTEX(pcpu_drain_mutex); -static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY volatile unsigned long latent_entropy __latent_entropy; @@ -3109,9 +3103,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * Called from the vmstat counter updater to drain pagesets of this * currently executing processor on remote nodes after they have * expired. - * - * Note that this function must be called with the thread pinned to - * a single processor. */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { @@ -3136,10 +3127,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) /* * Drain pcplists of the indicated processor and zone. - * - * The processor must either be the current processor and the - * thread pinned to the current processor or a processor that - * is not online. */ static void drain_pages_zone(unsigned int cpu, struct zone *zone) { @@ -3158,10 +3145,6 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) /* * Drain pcplists of all zones on the indicated processor. - * - * The processor must either be the current processor and the - * thread pinned to the current processor or a processor that - * is not online. */ static void drain_pages(unsigned int cpu) { @@ -3174,9 +3157,6 @@ static void drain_pages(unsigned int cpu) /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. - * - * The CPU has to be pinned. When zone parameter is non-NULL, spill just - * the single zone's pages. */ void drain_local_pages(struct zone *zone) { @@ -3188,24 +3168,6 @@ void drain_local_pages(struct zone *zone) drain_pages(cpu); } -static void drain_local_pages_wq(struct work_struct *work) -{ - struct pcpu_drain *drain; - - drain = container_of(work, struct pcpu_drain, work); - - /* - * drain_all_pages doesn't use proper cpu hotplug protection so - * we can race with cpu offline when the WQ can move this from - * a cpu pinned worker to an unbound one. We can operate on a different - * cpu which is alright but we also have to make sure to not move to - * a different one. - */ - migrate_disable(); - drain_local_pages(drain->zone); - migrate_enable(); -} - /* * The implementation of drain_all_pages(), exposing an extra parameter to * drain on all cpus. @@ -3226,13 +3188,6 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus) */ static cpumask_t cpus_with_pcps; - /* - * Make sure nobody triggers this path before mm_percpu_wq is fully - * initialized. - */ - if (WARN_ON_ONCE(!mm_percpu_wq)) - return; - /* * Do not drain if one is already in progress unless it's specific to * a zone. Such callers are primarily CMA and memory hotplug and need @@ -3282,14 +3237,11 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus) } for_each_cpu(cpu, &cpus_with_pcps) { - struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); - - drain->zone = zone; - INIT_WORK(&drain->work, drain_local_pages_wq); - queue_work_on(cpu, mm_percpu_wq, &drain->work); + if (zone) + drain_pages_zone(cpu, zone); + else + drain_pages(cpu); } - for_each_cpu(cpu, &cpus_with_pcps) - flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); mutex_unlock(&pcpu_drain_mutex); } @@ -3298,8 +3250,6 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus) * Spill all the per-cpu pages from all CPUs back into the buddy allocator. * * When zone parameter is non-NULL, spill just the single zone's pages. - * - * Note that this can be extremely slow as the draining happens in a workqueue. */ void drain_all_pages(struct zone *zone) { From bcd90bb8c4f10e1bfaed8799be074f3d43ef962f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 24 Jun 2022 13:54:23 +0100 Subject: [PATCH 158/321] mm/page_alloc: replace local_lock with normal spinlock struct per_cpu_pages is no longer strictly local as PCP lists can be drained remotely using a lock for protection. While the use of local_lock works, it goes against the intent of local_lock which is for "pure CPU local concurrency control mechanisms and not suited for inter-CPU concurrency control" (Documentation/locking/locktypes.rst) local_lock protects against migration between when the percpu pointer is accessed and the pcp->lock acquired. The lock acquisition is a preemption point so in the worst case, a task could migrate to another NUMA node and accidentally allocate remote memory. The main requirement is to pin the task to a CPU that is suitable for PREEMPT_RT and !PREEMPT_RT. Replace local_lock with helpers that pin a task to a CPU, lookup the per-cpu structure and acquire the embedded lock. It's similar to local_lock without breaking the intent behind the API. It is not a complete API as only the parts needed for PCP-alloc are implemented but in theory, the generic helpers could be promoted to a general API if there was demand for an embedded lock within a per-cpu struct with a guarantee that the per-cpu structure locked matches the running CPU and cannot use get_cpu_var due to RT concerns. PCP requires these semantics to avoid accidentally allocating remote memory. Link: https://lkml.kernel.org/r/20220624125423.6126-8-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Yu Zhao Reviewed-by: Nicolas Saenz Julienne Tested-by: Nicolas Saenz Julienne Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Marcelo Tosatti Cc: Marek Szyprowski Cc: Michal Hocko Cc: Minchan Kim Signed-off-by: Andrew Morton --- mm/page_alloc.c | 140 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 95 insertions(+), 45 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1a8abd8ef1ded..3a8725c8bb1b9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -126,13 +126,6 @@ typedef int __bitwise fpi_t; static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) -struct pagesets { - local_lock_t lock; -}; -static DEFINE_PER_CPU(struct pagesets, pagesets) = { - .lock = INIT_LOCAL_LOCK(lock), -}; - #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* * On SMP, spin_trylock is sufficient protection. @@ -147,6 +140,83 @@ static DEFINE_PER_CPU(struct pagesets, pagesets) = { #define pcp_trylock_finish(flags) local_irq_restore(flags) #endif +/* + * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid + * a migration causing the wrong PCP to be locked and remote memory being + * potentially allocated, pin the task to the CPU for the lookup+lock. + * preempt_disable is used on !RT because it is faster than migrate_disable. + * migrate_disable is used on RT because otherwise RT spinlock usage is + * interfered with and a high priority task cannot preempt the allocator. + */ +#ifndef CONFIG_PREEMPT_RT +#define pcpu_task_pin() preempt_disable() +#define pcpu_task_unpin() preempt_enable() +#else +#define pcpu_task_pin() migrate_disable() +#define pcpu_task_unpin() migrate_enable() +#endif + +/* + * Generic helper to lookup and a per-cpu variable with an embedded spinlock. + * Return value should be used with equivalent unlock helper. + */ +#define pcpu_spin_lock(type, member, ptr) \ +({ \ + type *_ret; \ + pcpu_task_pin(); \ + _ret = this_cpu_ptr(ptr); \ + spin_lock(&_ret->member); \ + _ret; \ +}) + +#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \ +({ \ + type *_ret; \ + pcpu_task_pin(); \ + _ret = this_cpu_ptr(ptr); \ + spin_lock_irqsave(&_ret->member, flags); \ + _ret; \ +}) + +#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \ +({ \ + type *_ret; \ + pcpu_task_pin(); \ + _ret = this_cpu_ptr(ptr); \ + if (!spin_trylock_irqsave(&_ret->member, flags)) { \ + pcpu_task_unpin(); \ + _ret = NULL; \ + } \ + _ret; \ +}) + +#define pcpu_spin_unlock(member, ptr) \ +({ \ + spin_unlock(&ptr->member); \ + pcpu_task_unpin(); \ +}) + +#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \ +({ \ + spin_unlock_irqrestore(&ptr->member, flags); \ + pcpu_task_unpin(); \ +}) + +/* struct per_cpu_pages specific helpers. */ +#define pcp_spin_lock(ptr) \ + pcpu_spin_lock(struct per_cpu_pages, lock, ptr) + +#define pcp_spin_lock_irqsave(ptr, flags) \ + pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags) + +#define pcp_spin_trylock_irqsave(ptr, flags) \ + pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags) + +#define pcp_spin_unlock(ptr) \ + pcpu_spin_unlock(lock, ptr) + +#define pcp_spin_unlock_irqrestore(ptr, flags) \ + pcpu_spin_unlock_irqrestore(lock, ptr, flags) #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); EXPORT_PER_CPU_SYMBOL(numa_node); @@ -1485,10 +1555,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* Ensure requested pindex is drained first. */ pindex = pindex - 1; - /* - * local_lock_irq held so equivalent to spin_lock_irqsave for - * both PREEMPT_RT and non-PREEMPT_RT configurations. - */ + /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ spin_lock(&zone->lock); isolated_pageblocks = has_isolate_pageblock(zone); @@ -3056,10 +3123,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, { int i, allocated = 0; - /* - * local_lock_irq held so equivalent to spin_lock_irqsave for - * both PREEMPT_RT and non-PREEMPT_RT configurations. - */ + /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ spin_lock(&zone->lock); for (i = 0; i < count; ++i) { struct page *page = __rmqueue(zone, order, migratetype, @@ -3431,18 +3495,16 @@ void free_unref_page(struct page *page, unsigned int order) migratetype = MIGRATE_MOVABLE; } - local_lock_irqsave(&pagesets.lock, flags); zone = page_zone(page); pcp_trylock_prepare(UP_flags); - pcp = this_cpu_ptr(zone->per_cpu_pageset); - if (spin_trylock(&pcp->lock)) { + pcp = pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, zone->per_cpu_pageset, flags); + if (pcp) { free_unref_page_commit(zone, pcp, page, migratetype, order); - spin_unlock(&pcp->lock); + pcp_spin_unlock_irqrestore(pcp, flags); } else { free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); } pcp_trylock_finish(UP_flags); - local_unlock_irqrestore(&pagesets.lock, flags); } /* @@ -3477,17 +3539,16 @@ void free_unref_page_list(struct list_head *list) } } - local_lock_irqsave(&pagesets.lock, flags); list_for_each_entry_safe(page, next, list, lru) { struct zone *zone = page_zone(page); /* Different zone, different pcp lock. */ if (zone != locked_zone) { if (pcp) - spin_unlock(&pcp->lock); + pcp_spin_unlock_irqrestore(pcp, flags); + locked_zone = zone; - pcp = this_cpu_ptr(zone->per_cpu_pageset); - spin_lock(&pcp->lock); + pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); } /* @@ -3506,18 +3567,14 @@ void free_unref_page_list(struct list_head *list) * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { - spin_unlock(&pcp->lock); - local_unlock_irqrestore(&pagesets.lock, flags); + pcp_spin_unlock_irqrestore(pcp, flags); batch_count = 0; - local_lock_irqsave(&pagesets.lock, flags); - pcp = this_cpu_ptr(locked_zone->per_cpu_pageset); - spin_lock(&pcp->lock); + pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); } } if (pcp) - spin_unlock(&pcp->lock); - local_unlock_irqrestore(&pagesets.lock, flags); + pcp_spin_unlock_irqrestore(pcp, flags); } /* @@ -3732,17 +3789,14 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, unsigned long flags; unsigned long __maybe_unused UP_flags; - local_lock_irqsave(&pagesets.lock, flags); - /* * spin_trylock may fail due to a parallel drain. In the future, the * trylock will also protect against IRQ reentrancy. */ - pcp = this_cpu_ptr(zone->per_cpu_pageset); pcp_trylock_prepare(UP_flags); - if (!spin_trylock(&pcp->lock)) { + pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); + if (!pcp) { pcp_trylock_finish(UP_flags); - local_unlock_irqrestore(&pagesets.lock, flags); return NULL; } @@ -3754,9 +3808,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, pcp->free_factor >>= 1; list = &pcp->lists[order_to_pindex(migratetype, order)]; page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); - spin_unlock(&pcp->lock); + pcp_spin_unlock_irqrestore(pcp, flags); pcp_trylock_finish(UP_flags); - local_unlock_irqrestore(&pagesets.lock, flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone, 1); @@ -5350,10 +5403,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, goto failed; /* Is a parallel drain in progress? */ - local_lock_irqsave(&pagesets.lock, flags); pcp_trylock_prepare(UP_flags); - pcp = this_cpu_ptr(zone->per_cpu_pageset); - if (!spin_trylock(&pcp->lock)) + pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); + if (!pcp) goto failed_irq; /* Attempt the batch allocation */ @@ -5371,7 +5423,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, if (unlikely(!page)) { /* Try and allocate at least one page */ if (!nr_account) { - spin_unlock(&pcp->lock); + pcp_spin_unlock_irqrestore(pcp, flags); goto failed_irq; } break; @@ -5386,9 +5438,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nr_populated++; } - spin_unlock(&pcp->lock); + pcp_spin_unlock_irqrestore(pcp, flags); pcp_trylock_finish(UP_flags); - local_unlock_irqrestore(&pagesets.lock, flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); @@ -5398,7 +5449,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, failed_irq: pcp_trylock_finish(UP_flags); - local_unlock_irqrestore(&pagesets.lock, flags); failed: page = __alloc_pages(gfp, 0, preferred_nid, nodemask); From c879896b7f22ac8c250afbfc4332731b7067305c Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 27 Jun 2022 09:46:45 +0100 Subject: [PATCH 159/321] mm/page_alloc: replace local_lock with normal spinlock -fix As noted by Yu Zhao, use pcp_spin_trylock_irqsave instead of pcpu_spin_trylock_irqsave. This is a fix to the mm-unstable patch mm-page_alloc-replace-local_lock-with-normal-spinlock.patch Link: https://lkml.kernel.org/r/20220627084645.GA27531@techsingularity.net Signed-off-by: Mel Gorman Reported-by: Yu Zhao Tested-by: Yu Zhao Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a8725c8bb1b9..c5c78170c49ad 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3497,7 +3497,7 @@ void free_unref_page(struct page *page, unsigned int order) zone = page_zone(page); pcp_trylock_prepare(UP_flags); - pcp = pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, zone->per_cpu_pageset, flags); + pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); if (pcp) { free_unref_page_commit(zone, pcp, page, migratetype, order); pcp_spin_unlock_irqrestore(pcp, flags); From e7471b87987b9a0f698139825c4d9d69d22db5fe Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Thu, 23 Jun 2022 15:06:06 -0700 Subject: [PATCH 160/321] procfs: add 'size' to /proc//fdinfo/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "procfs: Add file path and size to /proc//fdinfo", v2. Processes can pin shared memory by keeping a handle to it through a file descriptor; for instance dmabufs, memfd, and ashmem (in Android). In the case of a memory leak, to identify the process pinning the memory, userspace needs to: - Iterate the /proc//fd/* for each process - Do a readlink on each entry to identify the type of memory from the file path. - stat() each entry to get the size of the memory. The file permissions on /proc//fd/* only allows for the owner or root to perform the operations above; and so is not suitable for capturing the system-wide state in a production environment. This issue was addressed for dmabufs by making /proc/*/fdinfo/* accessible to a process with PTRACE_MODE_READ_FSCREDS credentials[1] To allow the same kind of tracking for other types of shared memory, add the following fields to /proc//fdinfo/: path - This allows identifying the type of memory based on common prefixes: e.g. "/memfd...", "/dmabuf...", "/dev/ashmem..." This was not an issued when dmabuf tracking was introduced because the exp_name field of dmabuf fdinfo could be used to distinguish dmabuf fds from other types. size - To track the amount of memory that is being pinned. dmabufs expose size as an additional field in fdinfo. Remove this and make it a common field for all fds. Access to /proc//fdinfo is governed by PTRACE_MODE_READ_FSCREDS -- the same as for /proc//maps which also exposes the path and size for mapped memory regions. This allows for a system process with PTRACE_MODE_READ_FSCREDS to account the pinned per-process memory via fdinfo. This patch (of 2): To be able to account the amount of memory a process is keeping pinned by open file descriptors add a 'size' field to fdinfo output. dmabufs fds already expose a 'size' field for this reason, remove this and make it a common field for all fds. This allows tracking of other types of memory (e.g. memfd and ashmem in Android). Link: https://lkml.kernel.org/r/20220623220613.3014268-1-kaleshsingh@google.com Link: https://lkml.kernel.org/r/20220623220613.3014268-2-kaleshsingh@google.com Signed-off-by: Kalesh Singh Reviewed-by: Christian König Cc: Al Viro Cc: Christoph Hellwig Cc: Stephen Brennan Cc: David Laight Cc: Ioannis Ilkos Cc: T.J. Mercier Cc: Suren Baghdasaryan Cc: Jonathan Corbet Cc: Sumit Semwal Cc: Johannes Weiner Cc: Christoph Anton Mitterer Cc: Colin Cross Cc: Paul Gortmaker Cc: Randy Dunlap Cc: Alexey Dobriyan Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 12 ++++++++++-- drivers/dma-buf/dma-buf.c | 1 - fs/proc/fd.c | 9 +++++---- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index e7aafc82be999..640fe47586e3e 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1891,13 +1891,14 @@ if precise results are needed. 3.8 /proc//fdinfo/ - Information about opened file --------------------------------------------------------------- This file provides information associated with an opened file. The regular -files have at least four fields -- 'pos', 'flags', 'mnt_id' and 'ino'. +files have at least five fields -- 'pos', 'flags', 'mnt_id', 'ino', and 'size'. + The 'pos' represents the current offset of the opened file in decimal form [see lseek(2) for details], 'flags' denotes the octal O_xxx mask the file has been created with [see open(2) for details] and 'mnt_id' represents mount ID of the file system containing the opened file [see 3.5 /proc//mountinfo for details]. 'ino' represents the inode number of -the file. +the file, and 'size' represents the size of the file in bytes. A typical output is:: @@ -1905,6 +1906,7 @@ A typical output is:: flags: 0100002 mnt_id: 19 ino: 63107 + size: 0 All locks associated with a file descriptor are shown in its fdinfo too:: @@ -1922,6 +1924,7 @@ Eventfd files flags: 04002 mnt_id: 9 ino: 63107 + size: 0 eventfd-count: 5a where 'eventfd-count' is hex value of a counter. @@ -1935,6 +1938,7 @@ Signalfd files flags: 04002 mnt_id: 9 ino: 63107 + size: 0 sigmask: 0000000000000200 where 'sigmask' is hex value of the signal mask associated @@ -1949,6 +1953,7 @@ Epoll files flags: 02 mnt_id: 9 ino: 63107 + size: 0 tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7 where 'tfd' is a target file descriptor number in decimal form, @@ -1967,6 +1972,7 @@ For inotify files the format is the following:: flags: 02000000 mnt_id: 9 ino: 63107 + size: 0 inotify wd:3 ino:9e7e sdev:800013 mask:800afce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:7e9e0000640d1b6d where 'wd' is a watch descriptor in decimal form, i.e. a target file @@ -1990,6 +1996,7 @@ For fanotify files the format is:: flags: 02 mnt_id: 9 ino: 63107 + size: 0 fanotify flags:10 event-flags:0 fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003 fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4 @@ -2015,6 +2022,7 @@ Timerfd files flags: 02 mnt_id: 9 ino: 63107 + size: 0 clockid: 0 ticks: 0 settime flags: 01 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 32f55640890ce..5f2ae38c960fd 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -378,7 +378,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) { struct dma_buf *dmabuf = file->private_data; - seq_printf(m, "size:\t%zu\n", dmabuf->size); /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 913bef0d2a36c..464bc3f557596 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -54,10 +54,11 @@ static int seq_show(struct seq_file *m, void *v) if (ret) return ret; - seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\nino:\t%lu\n", - (long long)file->f_pos, f_flags, - real_mount(file->f_path.mnt)->mnt_id, - file_inode(file)->i_ino); + seq_printf(m, "pos:\t%lli\n", (long long)file->f_pos); + seq_printf(m, "flags:\t0%o\n", f_flags); + seq_printf(m, "mnt_id:\t%i\n", real_mount(file->f_path.mnt)->mnt_id); + seq_printf(m, "ino:\t%lu\n", file_inode(file)->i_ino); + seq_printf(m, "size:\t%lli\n", (long long)file_inode(file)->i_size); /* show_fd_locks() never deferences files so a stale value is safe */ show_fd_locks(m, file, files); From 02d80a37ffe0f57914c4b37b861aee5843e558fb Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Thu, 23 Jun 2022 15:06:07 -0700 Subject: [PATCH 161/321] procfs: add 'path' to /proc//fdinfo/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to identify the type of memory a process has pinned through its open fds, add the file path to fdinfo output. This allows identifying memory types based on common prefixes: e.g. "/memfd...", "/dmabuf...", "/dev/ashmem...". To be cautious, only expose the paths for anonymous inodes, and this also avoids printing path names with strange characters. Access to /proc//fdinfo is governed by PTRACE_MODE_READ_FSCREDS the same as /proc//maps which also exposes the file path of mappings; so the security permissions for accessing path is consistent with that of /proc//maps. Link: https://lkml.kernel.org/r/20220623220613.3014268-3-kaleshsingh@google.com Signed-off-by: Kalesh Singh Cc: Alexey Dobriyan Cc: Al Viro Cc: Christian König Cc: Christoph Anton Mitterer Cc: Christoph Hellwig Cc: Colin Cross Cc: David Laight Cc: Ioannis Ilkos Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Paul Gortmaker Cc: Randy Dunlap Cc: Stephen Brennan Cc: Sumit Semwal Cc: Suren Baghdasaryan Cc: T.J. Mercier Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 10 ++++++++++ fs/libfs.c | 9 +++++++++ fs/proc/fd.c | 13 +++++++++++-- include/linux/fs.h | 1 + 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 640fe47586e3e..47e95dbc820d5 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1912,6 +1912,9 @@ All locks associated with a file descriptor are shown in its fdinfo too:: lock: 1: FLOCK ADVISORY WRITE 359 00:13:11691 0 EOF +Files with anonymous inodes have an additional 'path' field which represents +the anonymous file path. + The files such as eventfd, fsnotify, signalfd, epoll among the regular pos/flags pair provide additional information particular to the objects they represent. @@ -1925,6 +1928,7 @@ Eventfd files mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:[eventfd] eventfd-count: 5a where 'eventfd-count' is hex value of a counter. @@ -1939,6 +1943,7 @@ Signalfd files mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:[signalfd] sigmask: 0000000000000200 where 'sigmask' is hex value of the signal mask associated @@ -1954,6 +1959,7 @@ Epoll files mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:[eventpoll] tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7 where 'tfd' is a target file descriptor number in decimal form, @@ -1973,6 +1979,7 @@ For inotify files the format is the following:: mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:inotify inotify wd:3 ino:9e7e sdev:800013 mask:800afce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:7e9e0000640d1b6d where 'wd' is a watch descriptor in decimal form, i.e. a target file @@ -1997,6 +2004,7 @@ For fanotify files the format is:: mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:[fanotify] fanotify flags:10 event-flags:0 fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003 fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4 @@ -2023,6 +2031,7 @@ Timerfd files mnt_id: 9 ino: 63107 size: 0 + path: anon_inode:[timerfd] clockid: 0 ticks: 0 settime flags: 01 @@ -2047,6 +2056,7 @@ DMA Buffer files mnt_id: 9 ino: 63107 size: 32768 + path: /dmabuf: count: 2 exp_name: system-heap diff --git a/fs/libfs.c b/fs/libfs.c index 31b0ddf01c31d..6911749b4da79 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1217,6 +1217,15 @@ void kfree_link(void *p) } EXPORT_SYMBOL(kfree_link); +static const struct address_space_operations anon_aops = { + .dirty_folio = noop_dirty_folio, +}; + +bool is_anon_inode(struct inode *inode) +{ + return inode->i_mapping->a_ops == &anon_aops; +} + struct inode *alloc_anon_inode(struct super_block *s) { static const struct address_space_operations anon_aops = { diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 464bc3f557596..5bac79a2fa515 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -23,6 +23,7 @@ static int seq_show(struct seq_file *m, void *v) struct files_struct *files = NULL; int f_flags = 0, ret = -ENOENT; struct file *file = NULL; + struct inode *inode = NULL; struct task_struct *task; task = get_proc_task(m->private); @@ -54,11 +55,19 @@ static int seq_show(struct seq_file *m, void *v) if (ret) return ret; + inode = file_inode(file); + seq_printf(m, "pos:\t%lli\n", (long long)file->f_pos); seq_printf(m, "flags:\t0%o\n", f_flags); seq_printf(m, "mnt_id:\t%i\n", real_mount(file->f_path.mnt)->mnt_id); - seq_printf(m, "ino:\t%lu\n", file_inode(file)->i_ino); - seq_printf(m, "size:\t%lli\n", (long long)file_inode(file)->i_size); + seq_printf(m, "ino:\t%lu\n", inode->i_ino); + seq_printf(m, "size:\t%lli\n", (long long)inode->i_size); + + if (is_anon_inode(inode)) { + seq_puts(m, "path:\t"); + seq_file_path(m, file, "\n"); + seq_putc(m, '\n'); + } /* show_fd_locks() never deferences files so a stale value is safe */ show_fd_locks(m, file, files); diff --git a/include/linux/fs.h b/include/linux/fs.h index 134e9d7ad5d68..7132c6f955c19 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3115,6 +3115,7 @@ extern void page_put_link(void *); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); +extern bool is_anon_inode(struct inode *inode); void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); From 5f602723560e3bb7aa8098f7ee2cc35ad48eb5e0 Mon Sep 17 00:00:00 2001 From: Xiang Yang Date: Sat, 25 Jun 2022 14:18:44 +0800 Subject: [PATCH 162/321] mm/memcontrol.c: replace cgroup_memory_nokmem with mem_cgroup_kmem_disabled() mem_cgroup_kmem_disabled() checks whether the kmem accounting is off. Therefore, replace cgroup_memory_nokmem with mem_cgroup_kmem_disabled(), which is the same work in percpu.c and slab_common.c. Link: https://lkml.kernel.org/r/20220625061844.226764-1-xiangyang3@huawei.com Signed-off-by: Xiang Yang Reviewed-by: Muchun Song Acked-by: Roman Gushchin Acked-by: Souptick Joarder (HPE) Cc: Johannes Weiner Cc: Michal Hocko Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b56d1f654ff98..0bd5ed209d9e5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3652,7 +3652,7 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) { struct obj_cgroup *objcg; - if (cgroup_memory_nokmem) + if (mem_cgroup_kmem_disabled()) return 0; if (unlikely(mem_cgroup_is_root(memcg))) @@ -3676,7 +3676,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) { struct mem_cgroup *parent; - if (cgroup_memory_nokmem) + if (mem_cgroup_kmem_disabled()) return; if (unlikely(mem_cgroup_is_root(memcg))) From 3f1dba19b6af5a48202fc2f06b7dd77e35c4aa7e Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Mon, 27 Jun 2022 05:11:26 +0300 Subject: [PATCH 163/321] memcg: notify about global mem_cgroup_id space depletion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the host owner is not informed about the exhaustion of the global mem_cgroup_id space. When this happens, systemd cannot start a new service and receives a unique -ENOSPC error code. However, this can happen inside this container, persist in the log file of the local container, and may not be noticed by the host owner if he did not try to start any new services. Link: https://lkml.kernel.org/r/97bed1fd-f230-c2ea-1cb6-8230825a9a64@openvz.org Signed-off-by: Vasily Averin Cc: Shakeel Butt Cc: Roman Gushchin Cc: Michal Koutný Cc: Michal Hocko Cc: Vlastimil Babka Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0bd5ed209d9e5..d839d4b5340c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5174,6 +5174,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void) 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); if (memcg->id.id < 0) { error = memcg->id.id; + if (error == -ENOSPC) + pr_notice_ratelimited("mem_cgroup_id space is exhausted\n"); goto fail; } From 52e59cf273cdbd6e92c75b5191aad8ca680b412a Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 27 Jun 2022 21:23:51 +0800 Subject: [PATCH 164/321] filemap: minor cleanup for filemap_write_and_wait_range Restructure the logic in filemap_write_and_wait_range to simplify the code and make it more consistent with file_write_and_wait_range. No functional change intended. Link: https://lkml.kernel.org/r/20220627132351.55680-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/filemap.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index ffdfbc8b0e3ca..cd59f055e29d5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -667,7 +667,7 @@ EXPORT_SYMBOL_GPL(filemap_range_has_writeback); int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { - int err = 0; + int err = 0, err2; if (mapping_needs_writeback(mapping)) { err = __filemap_fdatawrite_range(mapping, lstart, lend, @@ -678,18 +678,12 @@ int filemap_write_and_wait_range(struct address_space *mapping, * But the -EIO is special case, it may indicate the worst * thing (e.g. bug) happened, so we avoid waiting for it. */ - if (err != -EIO) { - int err2 = filemap_fdatawait_range(mapping, - lstart, lend); - if (!err) - err = err2; - } else { - /* Clear any previously stored errors */ - filemap_check_errors(mapping); - } - } else { - err = filemap_check_errors(mapping); + if (err != -EIO) + __filemap_fdatawait_range(mapping, lstart, lend); } + err2 = filemap_check_errors(mapping); + if (!err) + err = err2; return err; } EXPORT_SYMBOL(filemap_write_and_wait_range); From cc1e2bf2cdab121fb101929ba4b09aee52285173 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 30 Jun 2022 10:41:20 +0200 Subject: [PATCH 165/321] lib/test_free_pages.c: pass a pointer to virt_to_page() In a recent change to the Arm architecture with the end goal of removing highmem we need to convert virt_to_phys() and virt_to_pfn() to static inline functions. This will make them strongly typed. However since virt_to_* is always implemented as macros they have become polymorphic and accept both (void *) and e.g. unsigned long as arguments. Other functions such as virt_to_page() simply wrap virt_to_pfn() and get affected indirectly. To be able to proceed, patch mm to use (void *) as argument to affected functions in all instances. This patch (of 5): A pointer into virtual memory is represented by a (void *) not an u32, so the compiler warns: lib/test_free_pages.c:20:50: warning: passing argument 1 of 'virt_to_pfn' makes pointer from integer without a cast [-Wint-conversion] Fix this with an explicit cast. Link: https://lkml.kernel.org/r/20220630084124.691207-1-linus.walleij@linaro.org Link: https://lkml.kernel.org/r/20220630084124.691207-2-linus.walleij@linaro.org Signed-off-by: Linus Walleij Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jason Gunthorpe Cc: Marco Elver Signed-off-by: Andrew Morton --- lib/test_free_pages.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c index 25ae1ac2624ae..9ebf6f5549f3c 100644 --- a/lib/test_free_pages.c +++ b/lib/test_free_pages.c @@ -17,7 +17,7 @@ static void test_free_pages(gfp_t gfp) for (i = 0; i < 1000 * 1000; i++) { unsigned long addr = __get_free_pages(gfp, 3); - struct page *page = virt_to_page(addr); + struct page *page = virt_to_page((void *)addr); /* Simulate page cache getting a speculative reference */ get_page(page); From 0658407e9259d06cbfa170a4d67bbc19d8e98335 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 30 Jun 2022 10:41:21 +0200 Subject: [PATCH 166/321] mm/highmem: pass a pointer to virt_to_page() Functions that work on a pointer to virtual memory such as virt_to_pfn() and users of that function such as virt_to_page() are supposed to pass a pointer to virtual memory, ideally a (void *) or other pointer. However since many architectures implement virt_to_pfn() as a macro, this function becomes polymorphic and accepts both a (unsigned long) and a (void *). If we instead implement a proper virt_to_pfn(void *addr) function the following happens (occurred on arch/arm): mm/highmem.c:153:29: warning: passing argument 1 of 'virt_to_pfn' makes pointer from integer without a cast [-Wint-conversion] We already have a proper void * pointer in the scope of this function named "vaddr" so pass that instead. Link: https://lkml.kernel.org/r/20220630084124.691207-3-linus.walleij@linaro.org Signed-off-by: Linus Walleij Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jason Gunthorpe Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/highmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/highmem.c b/mm/highmem.c index 1a692997fac4c..e92a7ceb30e8d 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -150,7 +150,7 @@ struct page *__kmap_to_page(void *vaddr) return pte_page(pkmap_page_table[i]); } - return virt_to_page(addr); + return virt_to_page(vaddr); } EXPORT_SYMBOL(__kmap_to_page); From b90689956bf0dcd4fb4412f60d6a10eb1e471a94 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 30 Jun 2022 10:41:22 +0200 Subject: [PATCH 167/321] mm: kfence: pass a pointer to virt_to_page() Functions that work on a pointer to virtual memory such as virt_to_pfn() and users of that function such as virt_to_page() are supposed to pass a pointer to virtual memory, ideally a (void *) or other pointer. However since many architectures implement virt_to_pfn() as a macro, this function becomes polymorphic and accepts both a (unsigned long) and a (void *). If we instead implement a proper virt_to_pfn(void *addr) function the following happens (occurred on arch/arm): mm/kfence/core.c:558:30: warning: passing argument 1 of 'virt_to_pfn' makes pointer from integer without a cast [-Wint-conversion] In one case we can refer to __kfence_pool directly (and that is a proper (char *) pointer) and in the other call site we use an explicit cast. Link: https://lkml.kernel.org/r/20220630084124.691207-4-linus.walleij@linaro.org Signed-off-by: Linus Walleij Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jason Gunthorpe Signed-off-by: Andrew Morton --- mm/kfence/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 6aff49f6b79ec..c252081b11dfe 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -546,7 +546,7 @@ static unsigned long kfence_init_pool(void) if (!arch_kfence_init_pool()) return addr; - pages = virt_to_page(addr); + pages = virt_to_page(__kfence_pool); /* * Set up object pages: they must have PG_slab set, to avoid freeing @@ -660,7 +660,7 @@ static bool kfence_init_pool_late(void) /* Same as above. */ free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); #ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE); + free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PAGE_SIZE); #else free_pages_exact((void *)addr, free_size); #endif From a9f0748b74cc357b00a97e3d1ff93622ba95a958 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 30 Jun 2022 10:41:23 +0200 Subject: [PATCH 168/321] mm: gup: pass a pointer to virt_to_page() Functions that work on a pointer to virtual memory such as virt_to_pfn() and users of that function such as virt_to_page() are supposed to pass a pointer to virtual memory, ideally a (void *) or other pointer. However since many architectures implement virt_to_pfn() as a macro, this function becomes polymorphic and accepts both a (unsigned long) and a (void *). If we instead implement a proper virt_to_pfn(void *addr) function the following happens (occurred on arch/arm): mm/gup.c: In function '__get_user_pages_locked': mm/gup.c:1599:49: warning: passing argument 1 of 'virt_to_pfn' makes pointer from integer without a cast [-Wint-conversion] pages[i] = virt_to_page(start); Fix this with an explicit cast. Link: https://lkml.kernel.org/r/20220630084124.691207-5-linus.walleij@linaro.org Signed-off-by: Linus Walleij Reviewed-by: Jason Gunthorpe Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/gup.c b/mm/gup.c index e3aec764e3b59..0fa09ac3cf2f5 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1707,7 +1707,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, goto finish_or_fault; if (pages) { - pages[i] = virt_to_page(start); + pages[i] = virt_to_page((void *)start); if (pages[i]) get_page(pages[i]); } From 2ed010e55850c70730ecf848cc3ee2242048a5e5 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 30 Jun 2022 10:41:24 +0200 Subject: [PATCH 169/321] mm: nommu: pass a pointer to virt_to_page() Functions that work on a pointer to virtual memory such as virt_to_pfn() and users of that function such as virt_to_page() are supposed to pass a pointer to virtual memory, ideally a (void *) or other pointer. However since many architectures implement virt_to_pfn() as a macro, this function becomes polymorphic and accepts both a (unsigned long) and a (void *). If we instead implement a proper virt_to_pfn(void *addr) function the following happens (occurred on arch/arm): mm/nommu.c: In function 'free_page_series': mm/nommu.c:501:50: warning: passing argument 1 of 'virt_to_pfn' makes pointer from integer without a cast [-Wint-conversion] struct page *page = virt_to_page(from); Fix this with an explicit cast. Link: https://lkml.kernel.org/r/20220630084124.691207-6-linus.walleij@linaro.org Signed-off-by: Linus Walleij Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jason Gunthorpe Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/nommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/nommu.c b/mm/nommu.c index 7b3fad6118953..6c611a689ec08 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -499,7 +499,7 @@ static void delete_nommu_region(struct vm_region *region) static void free_page_series(unsigned long from, unsigned long to) { for (; from < to; from += PAGE_SIZE) { - struct page *page = virt_to_page(from); + struct page *page = virt_to_page((void *)from); atomic_long_dec(&mmap_pages_allocated); put_page(page); From 5121b6c742e974ff7965f2fc76e109d87f4c880f Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:05 +0530 Subject: [PATCH 170/321] mm/mmap: build protect protection_map[] with __P000 Patch series "mm/mmap: Drop __SXXX/__PXXX macros from across platforms", v6. __SXXX/__PXXX macros are an unnecessary abstraction layer in creating the generic protection_map[] array which is used for vm_get_page_prot(). This abstraction layer can be avoided, if the platforms just define the array protection_map[] for all possible vm_flags access permission combinations and also export vm_get_page_prot() implementation. This series drops __SXXX/__PXXX macros from across platforms in the tree. First it build protects generic protection_map[] array with '#ifdef __P000' and moves it inside platforms which enable ARCH_HAS_VM_GET_PAGE_PROT. Later this build protects same array with '#ifdef ARCH_HAS_VM_GET_PAGE_PROT' and moves inside remaining platforms while enabling ARCH_HAS_VM_GET_PAGE_PROT. This adds a new macro DECLARE_VM_GET_PAGE_PROT defining the current generic vm_get_page_prot(), in order for it to be reused on platforms that do not require custom implementation. Finally, ARCH_HAS_VM_GET_PAGE_PROT can just be dropped, as all platforms now define and export vm_get_page_prot(), via looking up a private and static protection_map[] array. protection_map[] data type has been changed as 'static const' on all platforms that do not change it during boot. This patch (of 26): Build protect generic protection_map[] array with __P000, so that it can be moved inside all the platforms one after the other. Otherwise there will be build failures during this process. CONFIG_ARCH_HAS_VM_GET_PAGE_PROT cannot be used for this purpose as only certain platforms enable this config now. Link: https://lkml.kernel.org/r/20220630051630.1718927-1-anshuman.khandual@arm.com Link: https://lkml.kernel.org/r/20220630051630.1718927-2-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christoph Hellwig Reviewed-by: Christophe Leroy Suggested-by: Christophe Leroy Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 ++ mm/mmap.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index d112fea0625a9..f03c5c1208fdd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -425,7 +425,9 @@ extern unsigned int kobjsize(const void *objp); * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ +#ifdef __P000 extern pgprot_t protection_map[16]; +#endif /* * The default fault flags that should be used by most of the diff --git a/mm/mmap.c b/mm/mmap.c index 8236834728ec3..2d67fb07fa3ac 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -100,6 +100,7 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, * w: (no) no * x: (yes) yes */ +#ifdef __P000 pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = __P000, [VM_READ] = __P001, @@ -118,6 +119,7 @@ pgprot_t protection_map[16] __ro_after_init = { [VM_SHARED | VM_EXEC | VM_WRITE] = __S110, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111 }; +#endif #ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT pgprot_t vm_get_page_prot(unsigned long vm_flags) From 4b9785bd51d79b023d9d886c22f45add431fed3e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:06 +0530 Subject: [PATCH 171/321] mm/mmap: define DECLARE_VM_GET_PAGE_PROT This just converts the generic vm_get_page_prot() implementation into a new macro i.e DECLARE_VM_GET_PAGE_PROT which later can be used across platforms when enabling them with ARCH_HAS_VM_GET_PAGE_PROT. This does not create any functional change. Link: https://lkml.kernel.org/r/20220630051630.1718927-3-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christophe Leroy Suggested-by: Christoph Hellwig Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 28 ++++++++++++++++++++++++++++ mm/mmap.c | 26 +------------------------- 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 3cdc16cfd8673..014ee8f0fbaab 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1689,4 +1689,32 @@ typedef unsigned int pgtbl_mod_mask; #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif +/* description of effects of mapping type and prot in current implementation. + * this is due to the limited x86 page protection hardware. The expected + * behavior is in parens: + * + * map_type prot + * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC + * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (yes) yes w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (copy) copy w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and + * MAP_PRIVATE (with Enhanced PAN supported): + * r: (no) no + * w: (no) no + * x: (yes) yes + */ +#define DECLARE_VM_GET_PAGE_PROT \ +pgprot_t vm_get_page_prot(unsigned long vm_flags) \ +{ \ + return protection_map[vm_flags & \ + (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ +} \ +EXPORT_SYMBOL(vm_get_page_prot); + #endif /* _LINUX_PGTABLE_H */ diff --git a/mm/mmap.c b/mm/mmap.c index 2d67fb07fa3ac..d329e54ab2069 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -80,26 +80,6 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, struct vm_area_struct *next, unsigned long start, unsigned long end); -/* description of effects of mapping type and prot in current implementation. - * this is due to the limited x86 page protection hardware. The expected - * behavior is in parens: - * - * map_type prot - * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC - * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (yes) yes w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (copy) copy w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and - * MAP_PRIVATE (with Enhanced PAN supported): - * r: (no) no - * w: (no) no - * x: (yes) yes - */ #ifdef __P000 pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = __P000, @@ -122,11 +102,7 @@ pgprot_t protection_map[16] __ro_after_init = { #endif #ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT -pgprot_t vm_get_page_prot(unsigned long vm_flags) -{ - return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; -} -EXPORT_SYMBOL(vm_get_page_prot); +DECLARE_VM_GET_PAGE_PROT #endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) From b3761a69d0768fdf3bb2fcdb02c68de22e43b58d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:07 +0530 Subject: [PATCH 172/321] powerpc/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and while here, also enable ARCH_HAS_VM_GET_PAGE_PROT on 32 bit and nohash 64 (aka book3e/64) platforms via DECLARE_VM_GET_PAGE_PROT. Link: https://lkml.kernel.org/r/20220630051630.1718927-4-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Michael Ellerman Cc: Paul Mackerras Cc: Nicholas Piggin Reviewed-by: Christophe Leroy Signed-off-by: Andrew Morton --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/pgtable.h | 20 +------------------- arch/powerpc/mm/pgtable.c | 24 ++++++++++++++++++++++++ 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c2ce2e60c8f0f..1035d172c7dd5 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,7 +140,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64 + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d564d0ecd4cd7..33f4bf8d22b01 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,25 +20,6 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ -/* Note due to the way vm flags are laid out, the bits are XWR */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - #ifndef __ASSEMBLY__ #ifndef MAX_PTRS_PER_PGD @@ -79,6 +60,7 @@ extern void paging_init(void); void poking_init(void); extern unsigned long ioremap_bot; +extern const pgprot_t protection_map[16]; /* * kern_addr_valid is intended to indicate whether an address is a valid diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index e6166b71d36d4..cb2dcdb18f8ec 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -472,3 +472,27 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, return ret_pte; } EXPORT_SYMBOL_GPL(__find_linux_pte); + +/* Note due to the way vm flags are laid out, the bits are XWR */ +const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_X, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY_X, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; + +#ifndef CONFIG_PPC_BOOK3S_64 +DECLARE_VM_GET_PAGE_PROT +#endif From c084b13116416e8ca8aa34c6fec55e19422de922 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:08 +0530 Subject: [PATCH 173/321] sparc/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and while here, also enable ARCH_HAS_VM_GET_PAGE_PROT on 32 bit platforms via DECLARE_VM_GET_PAGE_PROT. Link: https://lkml.kernel.org/r/20220630051630.1718927-5-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: "David S. Miller" Reviewed-by: Sam Ravnborg Signed-off-by: Andrew Morton --- arch/sparc/Kconfig | 2 +- arch/sparc/include/asm/pgtable_32.h | 19 ------------------- arch/sparc/include/asm/pgtable_64.h | 19 ------------------- arch/sparc/mm/init_32.c | 20 ++++++++++++++++++++ arch/sparc/mm/init_64.c | 3 +++ 5 files changed, 24 insertions(+), 39 deletions(-) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ba449c47effd8..09f868613a4d2 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -13,6 +13,7 @@ config 64BIT config SPARC bool default y + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO select DMA_OPS @@ -84,7 +85,6 @@ config SPARC64 select PERF_USE_VMALLOC select ARCH_HAVE_NMI_SAFE_CMPXCHG select HAVE_C_RECORDMCOUNT - select ARCH_HAS_VM_GET_PAGE_PROT select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 4866625da3140..8ff549004fac4 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -64,25 +64,6 @@ void paging_init(void); extern unsigned long ptr_in_current_pgd; -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED - /* First physical page can be anywhere, the following is needed so that * va-->pa and vice versa conversions work properly without performance * hit for all __pa()/__va() operations. diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 4679e45c83483..a779418ceba9c 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -187,25 +187,6 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - #ifndef __ASSEMBLY__ pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 1e9f577f084da..d88e774c8eb49 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -302,3 +302,23 @@ void sparc_flush_page_to_ram(struct page *page) __flush_page_to_ram(vaddr); } EXPORT_SYMBOL(sparc_flush_page_to_ram); + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f6174df2d5af0..d6faee23c77dd 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2634,6 +2634,9 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ +/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ +static pgprot_t protection_map[16] __ro_after_init; + static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, From efb1202067e3b521788f5a38c5a51c0d5cc12172 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:09 +0530 Subject: [PATCH 174/321] arm64/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and makes it a static. Link: https://lkml.kernel.org/r/20220630051630.1718927-6-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Catalin Marinas Cc: Will Deacon Reviewed-by: Catalin Marinas Signed-off-by: Andrew Morton --- arch/arm64/include/asm/pgtable-prot.h | 18 ------------------ arch/arm64/mm/mmap.c | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 62e0ebeed7200..9b165117a4545 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -89,24 +89,6 @@ extern bool arm64_use_ng_mappings; #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_READONLY -#define __P011 PAGE_READONLY -#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_READONLY_EXEC -#define __P111 PAGE_READONLY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #endif /* __ASSEMBLY__ */ #endif /* __ASM_PGTABLE_PROT_H */ diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 78e9490f748d8..8f5b7ce857ed4 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -13,6 +13,27 @@ #include #include +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_READONLY, + [VM_WRITE | VM_READ] = PAGE_READONLY, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; + /* * You really shouldn't be using read() or write() on /dev/mem. This might go * away in the future. From a84819a47c99619f193c4896d446665b29a1a2e0 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:10 +0530 Subject: [PATCH 175/321] x86/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and makes it a static. This also defines a helper function add_encrypt_protection_map() that can update the protection_map[] array with pgprot_encrypted(). Link: https://lkml.kernel.org/r/20220630051630.1718927-7-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Thomas Gleixner Cc: Ingo Molnar Reviewed-by: Christoph Hellwig Signed-off-by: Andrew Morton --- arch/x86/include/asm/pgtable_types.h | 19 ------------------- arch/x86/mm/mem_encrypt_amd.c | 7 +++---- arch/x86/mm/pgprot.c | 27 +++++++++++++++++++++++++++ 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index bdaf8391e2e03..aa174fed3a71c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -230,25 +230,6 @@ enum page_cache_mode { #endif /* __ASSEMBLY__ */ -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - /* * early identity mapping pte attrib macros. */ diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index f6d038e2cd8e8..4b3ec87e8c7d0 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -484,10 +484,10 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo enc_dec_hypercall(vaddr, npages, enc); } +void add_encrypt_protection_map(void); + void __init sme_early_init(void) { - unsigned int i; - if (!sme_me_mask) return; @@ -496,8 +496,7 @@ void __init sme_early_init(void) __supported_pte_mask = __sme_set(__supported_pte_mask); /* Update the protection map with memory encryption mask */ - for (i = 0; i < ARRAY_SIZE(protection_map); i++) - protection_map[i] = pgprot_encrypted(protection_map[i]); + add_encrypt_protection_map(); x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c index 7637427822866..b867839b16aa8 100644 --- a/arch/x86/mm/pgprot.c +++ b/arch/x86/mm/pgprot.c @@ -4,6 +4,33 @@ #include #include +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; + +void add_encrypt_protection_map(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(protection_map); i++) + protection_map[i] = pgprot_encrypted(protection_map[i]); +} + pgprot_t vm_get_page_prot(unsigned long vm_flags) { unsigned long val = pgprot_val(protection_map[vm_flags & From 11d29a377c722c54e8c1d4edfa1b71b8ff5e7ed5 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 2 Jul 2022 13:22:52 -0700 Subject: [PATCH 176/321] x86-mm-move-protection_map-inside-the-platform-fix fix "no previous prototype for add_encrypt_protection_map" Reported-by: kernel test robot Cc: Anshuman Khandual Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- arch/x86/include/asm/mem_encrypt.h | 2 ++ arch/x86/mm/mem_encrypt_amd.c | 3 +-- arch/x86/mm/pgprot.c | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 88ceaf3648b32..72ca90552b6a4 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -89,6 +89,8 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void add_encrypt_protection_map(void); + /* * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when * writing to or comparing values from the cr3 register. Having the diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 4b3ec87e8c7d0..5c3c3ed46f5a5 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -484,8 +485,6 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo enc_dec_hypercall(vaddr, npages, enc); } -void add_encrypt_protection_map(void); - void __init sme_early_init(void) { if (!sme_me_mask) diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c index b867839b16aa8..c84bd9540b163 100644 --- a/arch/x86/mm/pgprot.c +++ b/arch/x86/mm/pgprot.c @@ -3,6 +3,7 @@ #include #include #include +#include static pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = PAGE_NONE, From 55abaa0eefdb3e1d083008a83480a4cb74f1c441 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:11 +0530 Subject: [PATCH 177/321] mm/mmap: build protect protection_map[] with ARCH_HAS_VM_GET_PAGE_PROT Now that protection_map[] has been moved inside those platforms that enable ARCH_HAS_VM_GET_PAGE_PROT. Hence generic protection_map[] array now can be protected with CONFIG_ARCH_HAS_VM_GET_PAGE_PROT intead of __P000. Link: https://lkml.kernel.org/r/20220630051630.1718927-8-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christophe Leroy Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 +- mm/mmap.c | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index f03c5c1208fdd..10b11e146eb15 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -425,7 +425,7 @@ extern unsigned int kobjsize(const void *objp); * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ -#ifdef __P000 +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT extern pgprot_t protection_map[16]; #endif diff --git a/mm/mmap.c b/mm/mmap.c index d329e54ab2069..83ee882cf2c61 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -80,7 +80,7 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, struct vm_area_struct *next, unsigned long start, unsigned long end); -#ifdef __P000 +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = __P000, [VM_READ] = __P001, @@ -99,9 +99,6 @@ pgprot_t protection_map[16] __ro_after_init = { [VM_SHARED | VM_EXEC | VM_WRITE] = __S110, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111 }; -#endif - -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT DECLARE_VM_GET_PAGE_PROT #endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */ From b7267cae6436bab16f68d82c530e9b1785d3f569 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:12 +0530 Subject: [PATCH 178/321] microblaze/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-9-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Michal Simek Signed-off-by: Andrew Morton --- arch/microblaze/Kconfig | 1 + arch/microblaze/include/asm/pgtable.h | 17 ----------------- arch/microblaze/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 8cf429ad1c84f..15f91ba8a0c4c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -7,6 +7,7 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 0c72646370e1a..ba348e997dbb4 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -204,23 +204,6 @@ extern pte_t *va_to_pte(unsigned long address); * We consider execute permission the same as read. * Also, write permissions imply read permissions. */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X #ifndef __ASSEMBLY__ /* diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index f4e503461d244..353fabdfcbc54 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -285,3 +285,23 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) return p; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT From d3526b01f3bf7870c6299d9087ab9857c1e9530e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:13 +0530 Subject: [PATCH 179/321] loongarch/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-10-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Huacai Chen Cc: WANG Xuerui Signed-off-by: Andrew Morton --- arch/loongarch/Kconfig | 1 + arch/loongarch/include/asm/pgtable-bits.h | 19 ---------- arch/loongarch/mm/cache.c | 46 +++++++++++++++++++++++ 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index db2838cf8c02f..adf8cf6ec5d5a 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -9,6 +9,7 @@ config LOONGARCH select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 3badd112d9ab2..9ca147a29bab8 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -83,25 +83,6 @@ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) - -#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ) -#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) - -#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ) -#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE) -#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE) -#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE) -#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE) - #ifndef __ASSEMBLY__ #define pgprot_noncached pgprot_noncached diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c index 9e5ce5aa73f74..e8c68dcf6ab20 100644 --- a/arch/loongarch/mm/cache.c +++ b/arch/loongarch/mm/cache.c @@ -139,3 +139,49 @@ void cpu_cache_init(void) shm_align_mask = PAGE_SIZE - 1; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE) +}; +DECLARE_VM_GET_PAGE_PROT From 34140f35b56a7785732d464dc7722f534b0cf057 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:14 +0530 Subject: [PATCH 180/321] openrisc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-11-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Stafford Horne Cc: Jonas Bonn Signed-off-by: Andrew Morton --- arch/openrisc/Kconfig | 1 + arch/openrisc/include/asm/pgtable.h | 18 ------------------ arch/openrisc/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e814df4c483ce..fe0dfb50eb867 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -10,6 +10,7 @@ config OPENRISC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select COMMON_CLK select OF select OF_EARLY_FLATTREE diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index c3abbf71e09fc..dcae8aea132fd 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -176,24 +176,6 @@ extern void paging_init(void); __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X - /* zero page used for uninitialized stuff */ extern unsigned long empty_zero_page[2048]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 3a021ab6f1aef..d531ab82be128 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -208,3 +208,23 @@ void __init mem_init(void) mem_init_done = 1; return; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT From 1642d00134c952092eeb667c72306062c50c4c3c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:15 +0530 Subject: [PATCH 181/321] xtensa/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-12-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Chris Zankel Cc: Guo Ren Signed-off-by: Andrew Morton --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/pgtable.h | 18 ------------------ arch/xtensa/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 0b0f0172cced5..4c0d83520ff15 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -11,6 +11,7 @@ config XTENSA select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_HAS_STRNCPY_FROM_USER if !KASAN select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 0a91376131c5d..e0d5531ae00d0 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -200,24 +200,6 @@ * What follows is the closest we can get by reasonable means.. * See linux/mm/mmap.c for protection_map[] array that uses these definitions. */ -#define __P000 PAGE_NONE /* private --- */ -#define __P001 PAGE_READONLY /* private --r */ -#define __P010 PAGE_COPY /* private -w- */ -#define __P011 PAGE_COPY /* private -wr */ -#define __P100 PAGE_READONLY_EXEC /* private x-- */ -#define __P101 PAGE_READONLY_EXEC /* private x-r */ -#define __P110 PAGE_COPY_EXEC /* private xw- */ -#define __P111 PAGE_COPY_EXEC /* private xwr */ - -#define __S000 PAGE_NONE /* shared --- */ -#define __S001 PAGE_READONLY /* shared --r */ -#define __S010 PAGE_SHARED /* shared -w- */ -#define __S011 PAGE_SHARED /* shared -wr */ -#define __S100 PAGE_READONLY_EXEC /* shared x-- */ -#define __S101 PAGE_READONLY_EXEC /* shared x-r */ -#define __S110 PAGE_SHARED_EXEC /* shared xw- */ -#define __S111 PAGE_SHARED_EXEC /* shared xwr */ - #ifndef __ASSEMBLY__ #define pte_ERROR(e) \ diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 6a32b2cf27185..fb830ec8183cd 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -216,3 +216,23 @@ static int __init parse_memmap_opt(char *str) return 0; } early_param("memmap", parse_memmap_opt); + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT From e77f810d21974252c333d4126dddcf210975e142 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 5 Jul 2022 15:14:11 -0700 Subject: [PATCH 182/321] xtensa: noMMU: fix vm_get_page_prot definition Don't define protection_map and don't use DECLARE_VM_GET_PAGE_PROT in noMMU configs, because in that case the definition for vm_get_page_prot is provided by the include/linux/mm.h Link: https://lkml.kernel.org/r/20220705221411.3381797-1-jcmvbkbc@gmail.com Fixes: 61ab8053710f ("xtensa/mm: enable ARCH_HAS_VM_GET_PAGE_PROT") Signed-off-by: Max Filippov Cc: Anshuman Khandual Cc: Chris Zankel Cc: Guo Ren Signed-off-by: Andrew Morton --- arch/xtensa/mm/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index fb830ec8183cd..b2587a1a7c462 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -217,6 +217,7 @@ static int __init parse_memmap_opt(char *str) } early_param("memmap", parse_memmap_opt); +#ifdef CONFIG_MMU static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, @@ -236,3 +237,4 @@ static const pgprot_t protection_map[16] = { [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC }; DECLARE_VM_GET_PAGE_PROT +#endif From 2d16fae8c17fb016431f4cb7a18114fb8f8c09aa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:16 +0530 Subject: [PATCH 183/321] hexagon/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-13-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Brian Cain Signed-off-by: Andrew Morton --- arch/hexagon/Kconfig | 1 + arch/hexagon/include/asm/pgtable.h | 27 ------------------- arch/hexagon/mm/init.c | 42 ++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 27 deletions(-) diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 54eadf2651786..bc4ceecd0588c 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -6,6 +6,7 @@ config HEXAGON def_bool y select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select DMA_GLOBAL_POOL # Other pending projects/to-do items. diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 0610724d6a280..f7048c18b6f97 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att; */ #define CACHEDEF (CACHE_DEFAULT << 6) -/* Private (copy-on-write) page protections. */ -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) -#define __P010 __P000 /* Write-only copy-on-write */ -#define __P011 __P001 /* Read/Write copy-on-write */ -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ - _PAGE_READ | CACHEDEF) -#define __P110 __P100 /* Write/execute copy-on-write */ -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ - -/* Shared page protections. */ -#define __S000 __P000 -#define __S001 __P001 -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_WRITE | CACHEDEF) -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_WRITE | CACHEDEF) -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __S101 __P101 -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ /* HUGETLB not working currently */ diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 3167a3b5c97b0..146115c9de617 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -234,3 +234,45 @@ void __init setup_arch_memory(void) * which is called by start_kernel() later on in the process */ } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_WRITE | CACHEDEF), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_EXECUTE | + _PAGE_WRITE | CACHEDEF) +}; +DECLARE_VM_GET_PAGE_PROT From 7e00b20327ea53bc35322442263029e764ed82e9 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:17 +0530 Subject: [PATCH 184/321] parisc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-14-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: "James E.J. Bottomley" Signed-off-by: Andrew Morton --- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/pgtable.h | 18 ------------------ arch/parisc/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index fa400055b2d50..891d823939576 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_HUGETLBFS if PA20 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 69765a6dbe89d..6a1899a9b4200 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -271,24 +271,6 @@ extern void __update_cache(pte_t pte); */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 __P000 /* copy on write */ -#define __P011 __P001 /* copy on write */ -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 __P100 /* copy on write */ -#define __P111 __P101 /* copy on write */ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX - extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 0a81499dd35e2..f03e0961fa256 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -871,3 +871,23 @@ void flush_tlb_all(void) spin_unlock(&sid_lock); } #endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_NONE, + [VM_WRITE | VM_READ] = PAGE_READONLY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT From 3a103d45bb5af398e3beb8e2a58f9c7a3724cbdc Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:18 +0530 Subject: [PATCH 185/321] alpha/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-15-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Richard Henderson Signed-off-by: Andrew Morton --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/pgtable.h | 17 ----------------- arch/alpha/mm/init.c | 22 ++++++++++++++++++++++ 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 7d0d26b5b3f52..db1c8b3294613 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,6 +2,7 @@ config ALPHA bool default y + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 170451fde043f..3ea9661c09ffc 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -116,23 +116,6 @@ struct vm_area_struct; * arch/alpha/mm/fault.c) */ /* xwr */ -#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) -#define __P010 _PAGE_P(_PAGE_FOE) -#define __P011 _PAGE_P(_PAGE_FOE) -#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) -#define __P101 _PAGE_P(_PAGE_FOW) -#define __P110 _PAGE_P(0) -#define __P111 _PAGE_P(0) - -#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) -#define __S010 _PAGE_S(_PAGE_FOE) -#define __S011 _PAGE_S(_PAGE_FOE) -#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) -#define __S101 _PAGE_S(_PAGE_FOW) -#define __S110 _PAGE_S(0) -#define __S111 _PAGE_S(0) /* * pgprot_noncached() is only for infiniband pci support, and a real diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 7511723b76693..a155180d7a837 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -280,3 +280,25 @@ mem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT From 7c7fc68124d2df2c526185f2486420370aea3a03 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:19 +0530 Subject: [PATCH 186/321] nios2/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-16-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Dinh Nguyen Signed-off-by: Andrew Morton --- arch/nios2/Kconfig | 1 + arch/nios2/include/asm/pgtable.h | 16 ---------------- arch/nios2/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 4167f1eb4cd83..e0459dffd2189 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -6,6 +6,7 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_DMA_SET_UNCACHED + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_SWAP select COMMON_CLK select TIMER_OF diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 262d0609268cd..470516d4555e1 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -40,24 +40,8 @@ struct mm_struct; */ /* Remove W bit on private pages for COW support */ -#define __P000 MKP(0, 0, 0) -#define __P001 MKP(0, 0, 1) -#define __P010 MKP(0, 0, 0) /* COW */ -#define __P011 MKP(0, 0, 1) /* COW */ -#define __P100 MKP(1, 0, 0) -#define __P101 MKP(1, 0, 1) -#define __P110 MKP(1, 0, 0) /* COW */ -#define __P111 MKP(1, 0, 1) /* COW */ /* Shared pages can have exact HW mapping */ -#define __S000 MKP(0, 0, 0) -#define __S001 MKP(0, 0, 1) -#define __S010 MKP(0, 1, 0) -#define __S011 MKP(0, 1, 1) -#define __S100 MKP(1, 0, 0) -#define __S101 MKP(1, 0, 1) -#define __S110 MKP(1, 1, 0) -#define __S111 MKP(1, 1, 1) /* Used all over the kernel */ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \ diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 613fcaa5988a9..ae24687d12ada 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -124,3 +124,23 @@ const char *arch_vma_name(struct vm_area_struct *vma) { return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = MKP(0, 0, 0), + [VM_READ] = MKP(0, 0, 1), + [VM_WRITE] = MKP(0, 0, 0), + [VM_WRITE | VM_READ] = MKP(0, 0, 1), + [VM_EXEC] = MKP(1, 0, 0), + [VM_EXEC | VM_READ] = MKP(1, 0, 1), + [VM_EXEC | VM_WRITE] = MKP(1, 0, 0), + [VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 0, 1), + [VM_SHARED] = MKP(0, 0, 0), + [VM_SHARED | VM_READ] = MKP(0, 0, 1), + [VM_SHARED | VM_WRITE] = MKP(0, 1, 0), + [VM_SHARED | VM_WRITE | VM_READ] = MKP(0, 1, 1), + [VM_SHARED | VM_EXEC] = MKP(1, 0, 0), + [VM_SHARED | VM_EXEC | VM_READ] = MKP(1, 0, 1), + [VM_SHARED | VM_EXEC | VM_WRITE] = MKP(1, 1, 0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1) +}; +DECLARE_VM_GET_PAGE_PROT From d9d6199fbd6e47c560bbcda6324723a714833ff2 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:20 +0530 Subject: [PATCH 187/321] riscv/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-17-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Paul Walmsley Cc: Palmer Dabbelt Signed-off-by: Andrew Morton --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/pgtable.h | 20 -------------------- arch/riscv/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index fcbb81feb7ad8..7fba25fc00490 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,6 +32,7 @@ config RISCV select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 1d1be9d9419c1..23e643db65753 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -186,26 +186,6 @@ extern struct pt_alloc_ops pt_ops __initdata; extern pgd_t swapper_pg_dir[]; -/* MAP_PRIVATE permissions: xwr (copy-on-write) */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXEC -#define __P101 PAGE_READ_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_READ_EXEC - -/* MAP_SHARED permissions: xwr */ -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXEC -#define __S101 PAGE_READ_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_present(pmd_t pmd) { diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index d466ec670e1fa..a88b7dc31a68c 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -288,6 +288,26 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) #endif /* CONFIG_XIP_KERNEL */ +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXEC, + [VM_EXEC | VM_READ] = PAGE_READ_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); From 5d69bc88f19b2dbaa54084c6c99d28c3c4ce85b6 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:21 +0530 Subject: [PATCH 188/321] csky/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-18-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Guo Ren Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton --- arch/csky/Kconfig | 1 + arch/csky/include/asm/pgtable.h | 18 ------------------ arch/csky/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 21d72b078eefc..588b8a9c68ede 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -6,6 +6,7 @@ config CSKY select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index bbe245117777a..229a5f4ad7fca 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -77,24 +77,6 @@ #define MAX_SWAPFILES_CHECK() \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_READ -#define __P011 PAGE_READ -#define __P100 PAGE_READ -#define __P101 PAGE_READ -#define __P110 PAGE_READ -#define __P111 PAGE_READ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_WRITE -#define __S011 PAGE_WRITE -#define __S100 PAGE_READ -#define __S101 PAGE_READ -#define __S110 PAGE_WRITE -#define __S111 PAGE_WRITE - extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bf2004aa811a0..bde7cabd23df4 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -197,3 +197,23 @@ void __init fixaddr_init(void) vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_READ, + [VM_WRITE | VM_READ] = PAGE_READ, + [VM_EXEC] = PAGE_READ, + [VM_EXEC | VM_READ] = PAGE_READ, + [VM_EXEC | VM_WRITE] = PAGE_READ, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, + [VM_SHARED | VM_EXEC] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE +}; +DECLARE_VM_GET_PAGE_PROT From 90aec993c5f1e8c342b8a5a378a8e3ff384e74ad Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:22 +0530 Subject: [PATCH 189/321] s390/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-19-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Heiko Carstens Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/s390/Kconfig | 1 + arch/s390/include/asm/pgtable.h | 17 ----------------- arch/s390/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 91c0b80a8bf04..c4481377ca830 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -81,6 +81,7 @@ config S390 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a397b072a5800..c63a05b5368ad 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr) * implies read permission. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_RO -#define __P010 PAGE_RO -#define __P011 PAGE_RO -#define __P100 PAGE_RX -#define __P101 PAGE_RX -#define __P110 PAGE_RX -#define __P111 PAGE_RX - -#define __S000 PAGE_NONE -#define __S001 PAGE_RO -#define __S010 PAGE_RW -#define __S011 PAGE_RW -#define __S100 PAGE_RX -#define __S101 PAGE_RX -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX /* * Segment entry (large page) protection definitions. diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index d545f5c39f7e4..5980ce3488325 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -188,3 +188,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_RO, + [VM_WRITE] = PAGE_RO, + [VM_WRITE | VM_READ] = PAGE_RO, + [VM_EXEC] = PAGE_RX, + [VM_EXEC | VM_READ] = PAGE_RX, + [VM_EXEC | VM_WRITE] = PAGE_RX, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_RO, + [VM_SHARED | VM_WRITE] = PAGE_RW, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW, + [VM_SHARED | VM_EXEC] = PAGE_RX, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT From cff96372c7cfc904895ef19ddbf18f4c2a37375c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:23 +0530 Subject: [PATCH 190/321] ia64/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-20-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton --- arch/ia64/Kconfig | 1 + arch/ia64/include/asm/pgtable.h | 18 ------------------ arch/ia64/mm/init.c | 28 +++++++++++++++++++++++++++- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cb93769a9f2ad..0510a57377116 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -12,6 +12,7 @@ config IA64 select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ACPI diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 7aa8f2330fb19..6925e28ae61d1 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -161,24 +161,6 @@ * attempts to write to the page. */ /* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */ -#define __P011 PAGE_READONLY /* ditto */ -#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) -#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */ -#define __S011 PAGE_SHARED -#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) -#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) -#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) - #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) #if CONFIG_PGTABLE_LEVELS == 4 #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 855d949d81dfe..fc4e4217e87ff 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -273,7 +273,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX); return 0; } @@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) __remove_pages(start_pfn, nr_pages, altmap); } #endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_READONLY, + [VM_WRITE | VM_READ] = PAGE_READONLY, + [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_X_RX), + [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RX), + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_X_RX), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RX), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RWX), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RWX) +}; +DECLARE_VM_GET_PAGE_PROT From 5874b0b7111bfd67f54d9247b73b43d72d444249 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:24 +0530 Subject: [PATCH 191/321] mips/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-21-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton --- arch/mips/Kconfig | 1 + arch/mips/include/asm/pgtable.h | 22 ---------------------- arch/mips/mm/cache.c | 3 +++ 3 files changed, 4 insertions(+), 22 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index db09d45d59ec7..d0b7eb11ec81b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -14,6 +14,7 @@ config MIPS select ARCH_HAS_STRNLEN_USER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 374c6322775d8..6caec386ad2f6 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -41,28 +41,6 @@ struct vm_area_struct; * by reasonable means.. */ -/* - * Dummy values to fill the table in mmap.c - * The real values will be generated at runtime - */ -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - extern unsigned long _page_cachable_default; extern void __update_cache(unsigned long address, pte_t pte); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7be7240f77031..11b3e7ddafd5c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -159,6 +159,9 @@ EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +static pgprot_t protection_map[16] __ro_after_init; +DECLARE_VM_GET_PAGE_PROT + static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); From db12b53f94fce8921a7bafabd3c8254a87524cee Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:25 +0530 Subject: [PATCH 192/321] m68k/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-22-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton --- arch/m68k/Kconfig | 1 + arch/m68k/include/asm/mcf_pgtable.h | 54 ----------------------- arch/m68k/include/asm/motorola_pgtable.h | 22 ---------- arch/m68k/include/asm/sun3_pgtable.h | 17 -------- arch/m68k/mm/mcfmmu.c | 55 ++++++++++++++++++++++++ arch/m68k/mm/motorola.c | 20 +++++++++ arch/m68k/mm/sun3mmu.c | 20 +++++++++ 7 files changed, 96 insertions(+), 93 deletions(-) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 936cce42ae9aa..49aa0cf13e96a 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,6 +7,7 @@ config M68K select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_PREEMPT if !COLDFIRE diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h index 94f38d76e2780..0e9c1b28dcabf 100644 --- a/arch/m68k/include/asm/mcf_pgtable.h +++ b/arch/m68k/include/asm/mcf_pgtable.h @@ -91,60 +91,6 @@ * for use. In general, the bit positions are xwr, and P-items are * private, the S-items are shared. */ -#define __P000 PAGE_NONE -#define __P001 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE) -#define __P010 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_WRITABLE) -#define __P011 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_WRITABLE) -#define __P100 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_EXEC) -#define __P101 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) -#define __P110 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_WRITABLE \ - | CF_PAGE_EXEC) -#define __P111 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_WRITABLE \ - | CF_PAGE_EXEC) - -#define __S000 PAGE_NONE -#define __S001 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE) -#define __S010 PAGE_SHARED -#define __S011 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_READABLE) -#define __S100 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_EXEC) -#define __S101 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) -#define __S110 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_EXEC) -#define __S111 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) - #define PTE_MASK PAGE_MASK #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 7c9b56e2a7509..63aaece0722f3 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -83,28 +83,6 @@ extern unsigned long mm_cachebits; #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) -/* - * The m68k can't do page protection for execute, and considers that the same are read. - * Also, write permissions imply read permissions. This is the closest we can get.. - */ -#define __P000 PAGE_NONE_C -#define __P001 PAGE_READONLY_C -#define __P010 PAGE_COPY_C -#define __P011 PAGE_COPY_C -#define __P100 PAGE_READONLY_C -#define __P101 PAGE_READONLY_C -#define __P110 PAGE_COPY_C -#define __P111 PAGE_COPY_C - -#define __S000 PAGE_NONE_C -#define __S001 PAGE_READONLY_C -#define __S010 PAGE_SHARED_C -#define __S011 PAGE_SHARED_C -#define __S100 PAGE_READONLY_C -#define __S101 PAGE_READONLY_C -#define __S110 PAGE_SHARED_C -#define __S111 PAGE_SHARED_C - #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) /* diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h index 5e4e753f0d246..9d919491765be 100644 --- a/arch/m68k/include/asm/sun3_pgtable.h +++ b/arch/m68k/include/asm/sun3_pgtable.h @@ -71,23 +71,6 @@ * protection settings, valid (implying read and execute) and writeable. These * are as close as we can get... */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED /* Use these fake page-protections on PMDs. */ #define SUN3_PMD_VALID (0x00000001) diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 6f1f251252944..70aa0979e0271 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -234,3 +234,58 @@ void steal_context(void) destroy_context(mm); } +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE), + [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE), + [VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED), + [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_SHARED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED | + CF_PAGE_EXEC) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index df7f797c908a2..31fba64bc5306 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -382,6 +382,26 @@ static void __init map_node(int node) #endif } +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE_C, + [VM_READ] = PAGE_READONLY_C, + [VM_WRITE] = PAGE_COPY_C, + [VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_EXEC] = PAGE_READONLY_C, + [VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_EXEC | VM_WRITE] = PAGE_COPY_C, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_SHARED] = PAGE_NONE_C, + [VM_SHARED | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C +}; +DECLARE_VM_GET_PAGE_PROT + /* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index dad4942244972..b619d0d4319cc 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -95,3 +95,23 @@ void __init paging_init(void) } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT From 1c5c2bf23bc522a25cfff8316310b0f84337ae59 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:26 +0530 Subject: [PATCH 193/321] arc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-23-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Vineet Gupta Signed-off-by: Andrew Morton --- arch/arc/Kconfig | 1 + arch/arc/include/asm/pgtable-bits-arcv2.h | 18 ------------------ arch/arc/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 9e3653253ef20..8be56a5d8a9bb 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,6 +13,7 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h index 183d23bc1e004..b23be557403e3 100644 --- a/arch/arc/include/asm/pgtable-bits-arcv2.h +++ b/arch/arc/include/asm/pgtable-bits-arcv2.h @@ -72,24 +72,6 @@ * This is to enable COW mechanism */ /* xwr */ -#define __P000 PAGE_U_NONE -#define __P001 PAGE_U_R -#define __P010 PAGE_U_R /* Pvt-W => !W */ -#define __P011 PAGE_U_R /* Pvt-W => !W */ -#define __P100 PAGE_U_X_R /* X => R */ -#define __P101 PAGE_U_X_R -#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ -#define __P111 PAGE_U_X_R /* Pvt-W => !W */ - -#define __S000 PAGE_U_NONE -#define __S001 PAGE_U_R -#define __S010 PAGE_U_W_R /* W => R */ -#define __S011 PAGE_U_W_R -#define __S100 PAGE_U_X_R /* X => R */ -#define __S101 PAGE_U_X_R -#define __S110 PAGE_U_X_W_R /* X => R */ -#define __S111 PAGE_U_X_W_R - #ifndef __ASSEMBLY__ #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 722d26b943072..fce5fa2b4f521 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -74,3 +74,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_U_NONE, + [VM_READ] = PAGE_U_R, + [VM_WRITE] = PAGE_U_R, + [VM_WRITE | VM_READ] = PAGE_U_R, + [VM_EXEC] = PAGE_U_X_R, + [VM_EXEC | VM_READ] = PAGE_U_X_R, + [VM_EXEC | VM_WRITE] = PAGE_U_X_R, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R, + [VM_SHARED] = PAGE_U_NONE, + [VM_SHARED | VM_READ] = PAGE_U_R, + [VM_SHARED | VM_WRITE] = PAGE_U_W_R, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R, + [VM_SHARED | VM_EXEC] = PAGE_U_X_R, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R +}; +DECLARE_VM_GET_PAGE_PROT From a51ff6caaeccd1b0777ec41f7e1fd476ce989ba5 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:27 +0530 Subject: [PATCH 194/321] arm/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-24-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Russell King Cc: Arnd Bergmann Signed-off-by: Andrew Morton --- arch/arm/Kconfig | 1 + arch/arm/include/asm/pgtable.h | 17 ----------------- arch/arm/lib/uaccess_with_memcpy.c | 2 +- arch/arm/mm/mmu.c | 20 ++++++++++++++++++++ 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7630ba9cb6ccc..e153b6d4fc5be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,6 +24,7 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index cd1f84bb40aea..78a532068fec2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * 2) If we could do execute protection, then read is implied * 3) write implies read permissions */ -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index c30b689bec2e9..14eecaaf295fa 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -237,7 +237,7 @@ static int __init test_size_treshold(void) if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); - user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); + user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY)); if (!user_ptr) goto no_vmap; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5e2be37a198e2..2722abddd7259 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -405,6 +405,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = __PAGE_NONE, + [VM_READ] = __PAGE_READONLY, + [VM_WRITE] = __PAGE_COPY, + [VM_WRITE | VM_READ] = __PAGE_COPY, + [VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC, + [VM_SHARED] = __PAGE_NONE, + [VM_SHARED | VM_READ] = __PAGE_READONLY, + [VM_SHARED | VM_WRITE] = __PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED, + [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT + /* * Adjust the PMD section entries according to the CPU in use. */ From 5fc44a22c74e81376c5907e81035a07081ed59b8 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:28 +0530 Subject: [PATCH 195/321] um/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-25-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Jeff Dike Signed-off-by: Andrew Morton --- arch/um/Kconfig | 1 + arch/um/include/asm/pgtable.h | 17 ----------------- arch/um/kernel/mem.c | 20 ++++++++++++++++++++ arch/x86/um/mem_32.c | 2 +- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 4ec22e156a2e3..7fb43654e5b5e 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -10,6 +10,7 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 167e236d9bb8b..66bc3f99d9bef 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -68,23 +68,6 @@ extern unsigned long end_iomem; * Also, write permissions imply read permissions. This is the closest we can * get.. */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 15295c3237a00..5b259f0a1f94f 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -197,3 +197,23 @@ void *uml_kmalloc(int size, int flags) { return kmalloc(size, flags); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 19c5dbd46770b..cafd01f730dab 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -17,7 +17,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = PAGE_READONLY; return 0; } From 501c0034cada789124902f0d0b91b74f0caee870 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:29 +0530 Subject: [PATCH 196/321] sh/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220630051630.1718927-26-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton --- arch/sh/Kconfig | 1 + arch/sh/include/asm/pgtable.h | 17 ----------------- arch/sh/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5f220e903e5ab..91f3ea325388f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,6 +12,7 @@ config SUPERH select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index d7ddb1ec86a03..6fb9ec54cf9b4 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void) * completely separate permission bits for user and kernel space. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX typedef pte_t *pte_addr_t; diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6a1a1297baaee..46c1183493aff 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -162,3 +162,23 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT From 13ef20881309cbfdcfee3f4787adf3d5434d9a48 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 6 Jul 2022 11:10:02 +0530 Subject: [PATCH 197/321] sh: noMMU: fix vm_get_page_prot definition Don't define protection_map and don't use DECLARE_VM_GET_PAGE_PROT in noMMU configs, because in that case the definition for vm_get_page_prot is provided by the include/linux/mm.h Link: https://lkml.kernel.org/r/20220706054002.1936820-1-anshuman.khandual@arm.com Fixes: 03a92d45899c ("sh/mm: enable ARCH_HAS_VM_GET_PAGE_PROT") Signed-off-by: Anshuman Khandual Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton --- arch/sh/mm/mmap.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 46c1183493aff..b82199878b459 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); #ifdef CONFIG_MMU +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT + /* * To avoid cache aliases, we map the shared page with same color. */ @@ -162,23 +182,3 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } - -static const pgprot_t protection_map[16] = { - [VM_NONE] = PAGE_NONE, - [VM_READ] = PAGE_READONLY, - [VM_WRITE] = PAGE_COPY, - [VM_WRITE | VM_READ] = PAGE_COPY, - [VM_EXEC] = PAGE_EXECREAD, - [VM_EXEC | VM_READ] = PAGE_EXECREAD, - [VM_EXEC | VM_WRITE] = PAGE_COPY, - [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, - [VM_SHARED] = PAGE_NONE, - [VM_SHARED | VM_READ] = PAGE_READONLY, - [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, - [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, - [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, - [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, - [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, - [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX -}; -DECLARE_VM_GET_PAGE_PROT From d42f72e72c85caa45773ce476ef817cf04f66aa4 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 30 Jun 2022 10:46:30 +0530 Subject: [PATCH 198/321] mm/mmap: drop ARCH_HAS_VM_GET_PAGE_PROT Now all the platforms enable ARCH_HAS_GET_PAGE_PROT. They define and export own vm_get_page_prot() whether custom or standard DECLARE_VM_GET_PAGE_PROT. Hence there is no need for default generic fallback for vm_get_page_prot(). Just drop this fallback and also ARCH_HAS_GET_PAGE_PROT mechanism. Link: https://lkml.kernel.org/r/20220630051630.1718927-27-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christoph Hellwig Reviewed-by: Christophe Leroy Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Signed-off-by: Andrew Morton --- arch/alpha/Kconfig | 1 - arch/arc/Kconfig | 1 - arch/arm/Kconfig | 1 - arch/arm64/Kconfig | 1 - arch/csky/Kconfig | 1 - arch/hexagon/Kconfig | 1 - arch/ia64/Kconfig | 1 - arch/loongarch/Kconfig | 1 - arch/m68k/Kconfig | 1 - arch/microblaze/Kconfig | 1 - arch/mips/Kconfig | 1 - arch/nios2/Kconfig | 1 - arch/openrisc/Kconfig | 1 - arch/parisc/Kconfig | 1 - arch/powerpc/Kconfig | 1 - arch/riscv/Kconfig | 1 - arch/s390/Kconfig | 1 - arch/sh/Kconfig | 1 - arch/sparc/Kconfig | 1 - arch/um/Kconfig | 1 - arch/x86/Kconfig | 1 - arch/xtensa/Kconfig | 1 - include/linux/mm.h | 3 --- mm/Kconfig | 3 --- mm/mmap.c | 22 ---------------------- 25 files changed, 50 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index db1c8b3294613..7d0d26b5b3f52 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,7 +2,6 @@ config ALPHA bool default y - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 8be56a5d8a9bb..9e3653253ef20 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,7 +13,6 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e153b6d4fc5be..7630ba9cb6ccc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,7 +24,6 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1652a9800ebee..7030bf3f8d6f6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -45,7 +45,6 @@ config ARM64 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_ELF_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 588b8a9c68ede..21d72b078eefc 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -6,7 +6,6 @@ config CSKY select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index bc4ceecd0588c..54eadf2651786 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -6,7 +6,6 @@ config HEXAGON def_bool y select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select DMA_GLOBAL_POOL # Other pending projects/to-do items. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 0510a57377116..cb93769a9f2ad 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -12,7 +12,6 @@ config IA64 select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ACPI diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index adf8cf6ec5d5a..db2838cf8c02f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -9,7 +9,6 @@ config LOONGARCH select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PTE_SPECIAL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 49aa0cf13e96a..936cce42ae9aa 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,7 +7,6 @@ config M68K select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_PREEMPT if !COLDFIRE diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 15f91ba8a0c4c..8cf429ad1c84f 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -7,7 +7,6 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d0b7eb11ec81b..db09d45d59ec7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -14,7 +14,6 @@ config MIPS select ARCH_HAS_STRNLEN_USER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index e0459dffd2189..4167f1eb4cd83 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -6,7 +6,6 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_DMA_SET_UNCACHED - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_SWAP select COMMON_CLK select TIMER_OF diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index fe0dfb50eb867..e814df4c483ce 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -10,7 +10,6 @@ config OPENRISC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select COMMON_CLK select OF select OF_EARLY_FLATTREE diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 891d823939576..fa400055b2d50 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,7 +12,6 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_HUGETLBFS if PA20 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1035d172c7dd5..250b8658b2d4b 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,7 +140,6 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7fba25fc00490..fcbb81feb7ad8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,7 +32,6 @@ config RISCV select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c4481377ca830..91c0b80a8bf04 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -81,7 +81,6 @@ config S390 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 91f3ea325388f..5f220e903e5ab 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,7 +12,6 @@ config SUPERH select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 09f868613a4d2..9c1cce74953a9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -13,7 +13,6 @@ config 64BIT config SPARC bool default y - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO select DMA_OPS diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 7fb43654e5b5e..4ec22e156a2e3 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -10,7 +10,6 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index be0b95e51df66..841e4843d0c48 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -94,7 +94,6 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4c0d83520ff15..0b0f0172cced5 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -11,7 +11,6 @@ config XTENSA select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_HAS_STRNCPY_FROM_USER if !KASAN select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS diff --git a/include/linux/mm.h b/include/linux/mm.h index 10b11e146eb15..e9f7c6ffe55bd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -425,9 +425,6 @@ extern unsigned int kobjsize(const void *objp); * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT -extern pgprot_t protection_map[16]; -#endif /* * The default fault flags that should be used by most of the diff --git a/mm/Kconfig b/mm/Kconfig index c1fa4993a56fe..56ca0e7c6f9a0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -951,9 +951,6 @@ config ARCH_HAS_CURRENT_STACK_POINTER register alias named "current_stack_pointer", this config can be selected. -config ARCH_HAS_VM_GET_PAGE_PROT - bool - config ARCH_HAS_PTE_DEVMAP bool diff --git a/mm/mmap.c b/mm/mmap.c index 83ee882cf2c61..03c9ca35e0f49 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -80,28 +80,6 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, struct vm_area_struct *next, unsigned long start, unsigned long end); -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT -pgprot_t protection_map[16] __ro_after_init = { - [VM_NONE] = __P000, - [VM_READ] = __P001, - [VM_WRITE] = __P010, - [VM_WRITE | VM_READ] = __P011, - [VM_EXEC] = __P100, - [VM_EXEC | VM_READ] = __P101, - [VM_EXEC | VM_WRITE] = __P110, - [VM_EXEC | VM_WRITE | VM_READ] = __P111, - [VM_SHARED] = __S000, - [VM_SHARED | VM_READ] = __S001, - [VM_SHARED | VM_WRITE] = __S010, - [VM_SHARED | VM_WRITE | VM_READ] = __S011, - [VM_SHARED | VM_EXEC] = __S100, - [VM_SHARED | VM_EXEC | VM_READ] = __S101, - [VM_SHARED | VM_EXEC | VM_WRITE] = __S110, - [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111 -}; -DECLARE_VM_GET_PAGE_PROT -#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */ - static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); From 633941ee4b33b9decc112d61367a8ad4e23985c7 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 1 Jul 2022 20:35:21 -0700 Subject: [PATCH 199/321] mm: memcontrol: do not miss MEMCG_MAX events for enforced allocations Yafang Shao reported an issue related to the accounting of bpf memory: if a bpf map is charged indirectly for memory consumed from an interrupt context and allocations are enforced, MEMCG_MAX events are not raised. It's not/less of an issue in a generic case because consequent allocations from a process context will trigger the direct reclaim and MEMCG_MAX events will be raised. However a bpf map can belong to a dying/abandoned memory cgroup, so there will be no allocations from a process context and no MEMCG_MAX events will be triggered. Link: https://lkml.kernel.org/r/20220702033521.64630-1-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Reported-by: Yafang Shao Acked-by: Shakeel Butt Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d839d4b5340c0..e2a53cd469fe3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2576,6 +2576,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, bool passed_oom = false; bool may_swap = true; bool drained = false; + bool raised_max_event = false; unsigned long pflags; retry: @@ -2615,6 +2616,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, goto nomem; memcg_memory_event(mem_over_limit, MEMCG_MAX); + raised_max_event = true; psi_memstall_enter(&pflags); nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, @@ -2681,6 +2683,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) return -ENOMEM; force: + /* + * If the allocation has to be enforced, don't forget to raise + * a MEMCG_MAX event. + */ + if (!raised_max_event) + memcg_memory_event(mem_over_limit, MEMCG_MAX); + /* * The allocation either can't fail or will lead to more memory * being freed very soon. Allow memory usage go over the limit From 600b4d50bcc5b227bf7d4044fc2ae8c5d25ce212 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:50 +0300 Subject: [PATCH 200/321] csky: drop definition of PTE_ORDER Patch series "arch: make PxD_ORDER generically available", v2. The question what does PxD_ORDER define raises from time to time and there is still a conflict between MIPS and DAX definitions. Some time ago Matthew Wilcox suggested to use PMD_TABLE_ORDER to define the order of page table allocation: [1] https://lore.kernel.org/linux-arch/YPCJftSTUBEnq2lI@casper.infradead.org/ The parisc patch made it in, but mips didn't. Now mips defines from asm/include/pgtable.h were copied to loongarch which made it worse. Let's deal with it once and for all and rename PxD_ORDER defines to PxD_TABLE_ORDER or just drop them when the only possible order of page table is 0. This patch (of 15): This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220705154708.181258-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20220703141203.147893-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20220703141203.147893-2-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: Matthew Wilcox Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/csky/include/asm/pgtable.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 229a5f4ad7fca..349e03dfb9bae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -19,11 +19,10 @@ * C-SKY is two-level paging structure: */ #define PGD_ORDER 0 -#define PTE_ORDER 0 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) #define PTRS_PER_PMD 1 -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) From ab94bb7c93bfeba0df1aa594dbba09e924147a9a Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:51 +0300 Subject: [PATCH 201/321] csky: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-3-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/csky/include/asm/pgalloc.h | 2 +- arch/csky/include/asm/pgtable.h | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index bbbd0698b3972..7d57e5da09146 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *ret; pgd_t *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 349e03dfb9bae..c3d9b92cbe61c 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -18,9 +18,8 @@ /* * C-SKY is two-level paging structure: */ -#define PGD_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PMD 1 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) From 46c8749c21445bbaa74d37bc13e04b42786cf370 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 3 Jul 2022 17:11:52 +0300 Subject: [PATCH 202/321] mips: rename PMD_ORDER to PMD_TABLE_ORDER This is the order of the page table allocation, not the order of a PMD. While at it remove unused defintion of _PMD_ORDER in asm-offsets. Link: https://lkml.kernel.org/r/20220703141203.147893-4-rppt@kernel.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 4 ++-- arch/mips/include/asm/pgtable-32.h | 2 +- arch/mips/include/asm/pgtable-64.h | 18 +++++++++--------- arch/mips/kernel/asm-offsets.c | 3 --- 4 files changed, 12 insertions(+), 15 deletions(-) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 867e9c3db76e9..0ef245cfcae96 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -67,12 +67,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_t *pmd; struct page *pg; - pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER); + pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_TABLE_ORDER); if (!pg) return NULL; if (!pgtable_pmd_page_ctor(pg)) { - __free_pages(pg, PMD_ORDER); + __free_pages(pg, PMD_TABLE_ORDER); return NULL; } diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 95df9c293d8d9..8d57bd5b0b94c 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -82,7 +82,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER aieeee_attempt_to_allocate_pmd +#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #define PTE_ORDER 0 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 41921acdc9d86..ae0d5a09064da 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -51,12 +51,12 @@ #define PMD_MASK (~(PMD_SIZE-1)) # ifdef __PAGETABLE_PUD_FOLDED -# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) # endif #endif #ifndef __PAGETABLE_PUD_FOLDED -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) @@ -91,13 +91,13 @@ # define PGD_ORDER 1 # define PUD_ORDER aieeee_attempt_to_allocate_pud # endif -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB @@ -107,22 +107,22 @@ #define PGD_ORDER 0 #endif #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #else -#define PMD_ORDER aieeee_attempt_to_allocate_pmd +#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #endif #define PTE_ORDER 0 #endif @@ -132,7 +132,7 @@ #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t)) #endif #ifndef __PAGETABLE_PMD_FOLDED -#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) +#define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) #endif #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 04ca75278f023..ca7c5af7697d1 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -197,9 +197,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); -#ifndef __PAGETABLE_PMD_FOLDED - DEFINE(_PMD_ORDER, PMD_ORDER); -#endif DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); From f9510397d1093efa79d15193ed8568f23b8d0037 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:53 +0300 Subject: [PATCH 203/321] mips: rename PUD_ORDER to PUD_TABLE_ORDER This is the order of the page table allocation, not the order of a PUD. Link: https://lkml.kernel.org/r/20220703141203.147893-5-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 2 +- arch/mips/include/asm/pgtable-32.h | 2 +- arch/mips/include/asm/pgtable-64.h | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 0ef245cfcae96..1ef8e86ae5655 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -91,7 +91,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); + pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_TABLE_ORDER); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 8d57bd5b0b94c..d9ae244a4fce6 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -81,7 +81,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #endif #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #define PTE_ORDER 0 diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index ae0d5a09064da..7daf9a6509d8a 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -59,7 +59,7 @@ #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3)) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -86,17 +86,17 @@ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 # define PGD_ORDER 0 -# define PUD_ORDER 0 +# define PUD_TABLE_ORDER 0 # else # define PGD_ORDER 1 -# define PUD_ORDER aieeee_attempt_to_allocate_pud +# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif @@ -106,19 +106,19 @@ #else #define PGD_ORDER 0 #endif -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 #define PMD_TABLE_ORDER 0 #else @@ -129,7 +129,7 @@ #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) #ifndef __PAGETABLE_PUD_FOLDED -#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t)) +#define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t)) #endif #ifndef __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) From d43dae95986bebbfabcf438bd5e2804495dc2878 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:54 +0300 Subject: [PATCH 204/321] mips: drop definitions of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-6-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgtable-32.h | 9 ++++----- arch/mips/include/asm/pgtable-64.h | 15 +++++---------- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/mm/tlbex.c | 2 +- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d9ae244a4fce6..35bd519a1078f 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -62,9 +62,9 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, /* PGDIR_SHIFT determines what a third-level page table entry can map */ #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1) +# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2 - 1) #else -# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) +# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -83,13 +83,12 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd -#define PTE_ORDER 0 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2) +# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2) #else -# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #endif #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 7daf9a6509d8a..dbf7e461d360f 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -42,11 +42,11 @@ /* PGDIR_SHIFT determines what a third-level page table entry can map */ #ifdef __PAGETABLE_PMD_FOLDED -#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3) +#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) #else /* PMD_SHIFT determines the size of the area a second-level page table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -86,19 +86,17 @@ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 # define PGD_ORDER 0 -# define PUD_TABLE_ORDER 0 +# define PUD_TABLE_ORDER 0 # else # define PGD_ORDER 1 -# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud +# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB #ifdef CONFIG_MIPS_VA_BITS_48 @@ -108,13 +106,11 @@ #endif #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 @@ -124,7 +120,6 @@ #else #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #endif -#define PTE_ORDER 0 #endif #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) @@ -134,7 +129,7 @@ #ifndef __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) #endif -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca7c5af7697d1..0c97f755e256f 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -197,7 +197,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); - DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 8dbbd99fc7e85..6e8e71f12fab1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2065,7 +2065,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, UASM_i_MFC0(p, wr.r1, C0_BADVADDR); UASM_i_LW(p, wr.r2, 0, wr.r2); - UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2); uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); From 1084cb91bc30e432edb6aad39714c8a3695a6aee Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:55 +0300 Subject: [PATCH 205/321] mips: rename PGD_ORDER to PGD_TABLE_ORDER This is the order of the page table allocation, not the order of a PGD. While at it remove unused defintion of _PGD_ORDER in asm-offsets. Link: https://lkml.kernel.org/r/20220703141203.147893-7-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 2 +- arch/mips/include/asm/pgtable-32.h | 6 +++--- arch/mips/include/asm/pgtable-64.h | 16 ++++++++-------- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/kvm/mmu.c | 2 +- arch/mips/mm/pgtable.c | 2 +- arch/mips/mm/tlbex.c | 12 ++++++------ 7 files changed, 20 insertions(+), 21 deletions(-) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 1ef8e86ae5655..796035784c733 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -51,7 +51,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGD_ORDER); + free_pages((unsigned long)pgd, PGD_TABLE_ORDER); } #define __pte_free_tlb(tlb,pte,address) \ diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 35bd519a1078f..495c603c1a30f 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -75,12 +75,12 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, * we don't really have any PUD/PMD directory physically. */ #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) +# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) #else -# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) +# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) #endif -#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) +#define PGD_TABLE_ORDER (__PGD_TABLE_ORDER >= 0 ? __PGD_TABLE_ORDER : 0) #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index dbf7e461d360f..a259ca4d1272a 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -85,35 +85,35 @@ */ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 -# define PGD_ORDER 0 +# define PGD_TABLE_ORDER 0 # define PUD_TABLE_ORDER 0 # else -# define PGD_ORDER 1 +# define PGD_TABLE_ORDER 1 # define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB #ifdef CONFIG_MIPS_VA_BITS_48 -#define PGD_ORDER 1 +#define PGD_TABLE_ORDER 1 #else -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #endif #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 #define PMD_TABLE_ORDER 0 @@ -122,7 +122,7 @@ #endif #endif -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD ((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t)) #ifndef __PAGETABLE_PUD_FOLDED #define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t)) #endif diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0c97f755e256f..c4501897b870b 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -196,7 +196,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - DEFINE(_PGD_ORDER, PGD_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 1bfd1b501d823..db17e870bdff5 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void) { pgd_t *ret; - ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) kvm_pgd_init(ret); diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 05560b042d822..3b7590660a04b 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -12,7 +12,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6e8e71f12fab1..a57519ae96b16 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -818,7 +818,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * everything but the lower xuseg addresses goes down * the module_alloc/vmalloc path. */ - uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, ptr, label_vmalloc); } else { uasm_il_bltz(p, r, tmp, label_vmalloc); @@ -1127,7 +1127,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_SW(p, scratch, scratchpad_offset(0), 0); uasm_i_dsrl_safe(p, scratch, tmp, - PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, scratch, label_vmalloc); if (pgd_reg == -1) { @@ -1493,12 +1493,12 @@ static void setup_pw(void) #endif pgd_i = PGDIR_SHIFT; /* 1st level PGD */ #ifndef __PAGETABLE_PMD_FOLDED - pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER; pmd_i = PMD_SHIFT; /* 2nd level PMD */ pmd_w = PMD_SHIFT - PAGE_SHIFT; #else - pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER; #endif pt_i = PAGE_SHIFT; /* 3rd level PTE */ @@ -1536,7 +1536,7 @@ static void build_loongson3_tlb_refill_handler(void) if (check_for_high_segbits) { uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_beqz(&p, &r, K1, label_vmalloc); uasm_i_nop(&p); @@ -2611,7 +2611,7 @@ void build_tlb_refill_handler(void) check_pabits(); #ifdef CONFIG_64BIT - check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); #endif if (cpu_has_3kex) { From e1fde2697fd8dad72c589a8fa96c1efcbce7bcf4 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:56 +0300 Subject: [PATCH 206/321] nios2: drop definition of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-8-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/nios2/include/asm/pgtable.h | 3 +-- arch/nios2/mm/init.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 470516d4555e1..1af8dbfe17935 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -53,10 +53,9 @@ struct mm_struct; #define PAGE_COPY MKP(0, 0, 1) #define PGD_ORDER 0 -#define PTE_ORDER 0 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD \ (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE) diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index ae24687d12ada..7eaba31cea98c 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -80,7 +80,7 @@ void __init mmu_init(void) #define __page_aligned(order) __aligned(PAGE_SIZE << (order)) pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); -pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); +pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; static int alloc_kuser_page(void) From f497574dbbe1bf5c5ec5a9b5fc686366ab3dbc7b Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:57 +0300 Subject: [PATCH 207/321] nios2: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-9-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/nios2/include/asm/pgtable.h | 4 +--- arch/nios2/mm/init.c | 3 +-- arch/nios2/mm/pgtable.c | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 1af8dbfe17935..b3d45e815295f 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -52,9 +52,7 @@ struct mm_struct; #define PAGE_COPY MKP(0, 0, 1) -#define PGD_ORDER 0 - -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD \ diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 7eaba31cea98c..7bc82ee889c95 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -78,8 +78,7 @@ void __init mmu_init(void) flush_tlb_all(); } -#define __page_aligned(order) __aligned(PAGE_SIZE << (order)) -pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c index 9b587fd592dd6..7c76e8a7447a6 100644 --- a/arch/nios2/mm/pgtable.c +++ b/arch/nios2/mm/pgtable.c @@ -54,7 +54,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); From 7735fdec3a9a6fd2108e3c4d56be299e286ce00b Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:58 +0300 Subject: [PATCH 208/321] loongarch: drop definition of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-10-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgtable.h | 9 ++++----- arch/loongarch/kernel/asm-offsets.c | 1 - arch/loongarch/mm/tlbex.S | 6 +++--- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index d9e86cfa53e22..e0bbfc31fe726 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -24,17 +24,16 @@ #define PGD_ORDER 0 #define PUD_ORDER 0 #define PMD_ORDER 0 -#define PTE_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 3 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) #elif CONFIG_PGTABLE_LEVELS == 4 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) @@ -55,7 +54,7 @@ #if CONFIG_PGTABLE_LEVELS > 2 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3) #endif -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3) +#define PTRS_PER_PTE (PAGE_SIZE >> 3) #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index bfb65eb2844f5..1a1166a7e61cf 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -194,7 +194,6 @@ void output_mm_defines(void) #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_ORDER, PMD_ORDER); #endif - DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 7eee402715774..e36c2c07dee37 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -83,7 +83,7 @@ vmalloc_done_load: bne t0, $r0, tlb_huge_update_load csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 @@ -247,7 +247,7 @@ vmalloc_done_store: bne t0, $r0, tlb_huge_update_store csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 @@ -414,7 +414,7 @@ vmalloc_done_modify: bne t0, $r0, tlb_huge_update_modify csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 From ef6ef4b03cdbd1aea1e0582f7d6f66c2c3f0e0c6 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:59 +0300 Subject: [PATCH 209/321] loongarch: drop definition of PMD_ORDER This is the order of the page table allocation, not the order of a PMD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-11-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgalloc.h | 4 ++-- arch/loongarch/include/asm/pgtable.h | 7 +++---- arch/loongarch/kernel/asm-offsets.c | 3 --- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index b0a57b25c131f..93e785f466397 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -66,12 +66,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_t *pmd; struct page *pg; - pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER); + pg = alloc_page(GFP_KERNEL_ACCOUNT); if (!pg) return NULL; if (!pgtable_pmd_page_ctor(pg)) { - __free_pages(pg, PMD_ORDER); + __free_page(pg); return NULL; } diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index e0bbfc31fe726..f926537d2233d 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -23,7 +23,6 @@ #define PGD_ORDER 0 #define PUD_ORDER 0 -#define PMD_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) @@ -31,12 +30,12 @@ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 4 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) @@ -52,7 +51,7 @@ #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3) #endif #if CONFIG_PGTABLE_LEVELS > 2 -#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3) +#define PTRS_PER_PMD (PAGE_SIZE >> 3) #endif #define PTRS_PER_PTE (PAGE_SIZE >> 3) diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 1a1166a7e61cf..aa4ef42d759f2 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -191,9 +191,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); -#ifndef __PAGETABLE_PMD_FOLDED - DEFINE(_PMD_ORDER, PMD_ORDER); -#endif BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); From 1f86cd45bda1edc0ec46704e9c260758614ac253 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:00 +0300 Subject: [PATCH 210/321] loongarch: drop definition of PUD_ORDER This is the order of the page table allocation, not the order of a PUD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-12-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgalloc.h | 2 +- arch/loongarch/include/asm/pgtable.h | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 93e785f466397..4bfeb3c9c9acc 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -90,7 +90,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); + pud = (pud_t *) __get_free_page(GFP_KERNEL); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index f926537d2233d..a97996fefaed0 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -22,7 +22,6 @@ #endif #define PGD_ORDER 0 -#define PUD_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) @@ -38,7 +37,7 @@ #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -48,7 +47,7 @@ #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3) #if CONFIG_PGTABLE_LEVELS > 3 -#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3) +#define PTRS_PER_PUD (PAGE_SIZE >> 3) #endif #if CONFIG_PGTABLE_LEVELS > 2 #define PTRS_PER_PMD (PAGE_SIZE >> 3) From 67cb5b502afc3d18181d476aad564349d2b08d5f Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:01 +0300 Subject: [PATCH 211/321] loongarch: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-13-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgtable.h | 6 ++---- arch/loongarch/kernel/asm-offsets.c | 1 - arch/loongarch/mm/pgtable.c | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index a97996fefaed0..e03443abaf7da 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -21,8 +21,6 @@ #include #endif -#define PGD_ORDER 0 - #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 3 @@ -43,9 +41,9 @@ #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3)) +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3) +#define PTRS_PER_PGD (PAGE_SIZE >> 3) #if CONFIG_PGTABLE_LEVELS > 3 #define PTRS_PER_PUD (PAGE_SIZE >> 3) #endif diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index aa4ef42d759f2..72f431a7289d3 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -190,7 +190,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - DEFINE(_PGD_ORDER, PGD_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 0569647152e9d..ee179ccd3e3f2 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -13,7 +13,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); From c4557bc49c9921731e1ef69d5fe9d7d5c6ab39e2 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 5 Jul 2022 18:47:05 +0300 Subject: [PATCH 212/321] loongarch: drop definition of PGD_ORDER drop extra BLANK() line in arch/loongarch/kernel/asm-offsets.c Link: https://lkml.kernel.org/r/20220705154708.181258-13-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Huacai Chen Signed-off-by: Andrew Morton --- arch/loongarch/kernel/asm-offsets.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 72f431a7289d3..4a3bb1b9aef3f 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -190,7 +190,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); BLANK(); From 212f5642ca1af881e08c82c9f81b16fcb44d4a39 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:02 +0300 Subject: [PATCH 213/321] parisc: rename PGD_ORDER to PGD_TABLE_ORDER This is the order of the page table allocation, not the order of a PGD. Link: https://lkml.kernel.org/r/20220703141203.147893-14-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/parisc/include/asm/pgalloc.h | 6 +++--- arch/parisc/include/asm/pgtable.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 54b63374579b3..e3e142b1c5c5f 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -20,18 +20,18 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (unlikely(pgd == NULL)) return NULL; - memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER); return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGD_ORDER); + free_pages((unsigned long)pgd, PGD_TABLE_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 6a1899a9b4200..df7b931865d22 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -118,9 +118,9 @@ extern void __update_cache(pte_t pte); #if CONFIG_PGTABLE_LEVELS == 3 #define PMD_TABLE_ORDER 1 -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #else -#define PGD_ORDER 1 +#define PGD_TABLE_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -144,10 +144,10 @@ extern void __update_cache(pte_t pte); /* Definitions for 1st level */ #define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) -#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG +#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) #else -#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) +#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) From 528ae9c2c02b79ef2f2dec3222700cc7141a71dd Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:03 +0300 Subject: [PATCH 214/321] xtensa: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-15-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Helge Deller Acked-by: Max Filippov Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/xtensa/include/asm/pgalloc.h | 2 +- arch/xtensa/include/asm/pgtable.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index eeb2de3a89e57..7fc0f9126dd35 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -29,7 +29,7 @@ static inline pgd_t* pgd_alloc(struct mm_struct *mm) { - return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); + return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO); } static inline void ptes_clear(pte_t *ptep) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index e0d5531ae00d0..54f577c13afa1 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -57,7 +57,6 @@ #define PTRS_PER_PTE 1024 #define PTRS_PER_PTE_SHIFT 10 #define PTRS_PER_PGD 1024 -#define PGD_ORDER 0 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) From 7b9be7475bef83612a1ee24e5e49143a94f259b9 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 5 Jul 2022 18:47:08 +0300 Subject: [PATCH 215/321] ARM: head.S: rename PMD_ORDER to PMD_ENTRY_ORDER PMD_ORDER denotes order of magnitude for a PMD entry, i.e PMD entry size is 2 ^ PMD_ORDER. Rename PMD_ORDER to PMD_ENTRY_ORDER to allow a generic definition of PMD_ORDER as order of a PMD allocation: (PMD_SHIFT - PAGE_SHIFT). Link: https://lkml.kernel.org/r/20220705154708.181258-16-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Russell King (Oracle) Signed-off-by: Andrew Morton --- arch/arm/kernel/head.S | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 500612d3da2e2..29e2900178a1f 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -38,10 +38,10 @@ #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 -#define PMD_ORDER 3 +#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */ #else #define PG_DIR_SIZE 0x4000 -#define PMD_ORDER 2 +#define PMD_ENTRY_ORDER 2 #endif .globl swapper_pg_dir @@ -240,7 +240,7 @@ __create_page_tables: mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base - str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping + str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b @@ -250,7 +250,7 @@ __create_page_tables: * set two variables to indicate the physical start and end of the * kernel. */ - add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 @@ -259,8 +259,8 @@ __create_page_tables: str r8, [r5] @ Save physical start of kernel (LE) #endif orr r3, r8, r7 @ Add the MMU flags - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) -1: str r3, [r0], #1 << PMD_ORDER + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) +1: str r3, [r0], #1 << PMD_ENTRY_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b @@ -280,14 +280,14 @@ __create_page_tables: mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT - add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) - str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! + add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER) + str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]! ldr r6, =(_edata_loc - 1) - add r0, r0, #1 << PMD_ORDER - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) + add r0, r0, #1 << PMD_ENTRY_ORDER + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT - strls r3, [r0], #1 << PMD_ORDER + strls r3, [r0], #1 << PMD_ENTRY_ORDER bls 1b #endif @@ -297,10 +297,10 @@ __create_page_tables: */ mov r0, r2, lsr #SECTION_SHIFT cmp r2, #0 - ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER) + ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER) addne r3, r3, r4 orrne r6, r7, r0, lsl #SECTION_SHIFT - strne r6, [r3], #1 << PMD_ORDER + strne r6, [r3], #1 << PMD_ENTRY_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] @@ -319,7 +319,7 @@ __create_page_tables: addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT - mov r3, r3, lsl #PMD_ORDER + mov r3, r3, lsl #PMD_ENTRY_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT @@ -349,7 +349,7 @@ __create_page_tables: * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ - add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif @@ -359,10 +359,10 @@ __create_page_tables: * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ - add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x02000000 str r3, [r0] - add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) str r3, [r0] #endif #endif From 71b4c38ef3ba87bdf49b9fbc867c62bdb7a002e4 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:28 +0800 Subject: [PATCH 216/321] mm: hugetlb_vmemmap: delete hugetlb_optimize_vmemmap_enabled() Patch series "Simplify hugetlb vmemmap and improve its readability", v2. This series aims to simplify hugetlb vmemmap and improve its readability. This patch (of 8): The name hugetlb_optimize_vmemmap_enabled() a bit confusing as it tests two conditions (enabled and pages in use). Instead of coming up to an appropriate name, we could just delete it. There is already a discussion about deleting it in thread [1]. There is only one user of hugetlb_optimize_vmemmap_enabled() outside of hugetlb_vmemmap, that is flush_dcache_page() in arch/arm64/mm/flush.c. However, it does not need to call hugetlb_optimize_vmemmap_enabled() in flush_dcache_page() since HugeTLB pages are always fully mapped and only head page will be set PG_dcache_clean meaning only head page's flag may need to be cleared (see commit cf5a501d985b). So it is easy to remove hugetlb_optimize_vmemmap_enabled(). Link: https://lore.kernel.org/all/c77c61c8-8a5a-87e8-db89-d04d8aaab4cc@oracle.com/ [1] Link: https://lkml.kernel.org/r/20220628092235.91270-2-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Oscar Salvador Reviewed-by: Mike Kravetz Reviewed-by: Catalin Marinas Cc: Will Deacon Cc: Anshuman Khandual Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- arch/arm64/mm/flush.c | 13 +++---------- include/linux/page-flags.h | 14 ++------------ 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index fc4f710e9820f..5f9379b3c8c87 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -76,17 +76,10 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache); void flush_dcache_page(struct page *page) { /* - * Only the head page's flags of HugeTLB can be cleared since the tail - * vmemmap pages associated with each HugeTLB page are mapped with - * read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more - * details can refer to vmemmap_remap_pte()). Although - * __sync_icache_dcache() only set PG_dcache_clean flag on the head - * page struct, there is more than one page struct with PG_dcache_clean - * associated with the HugeTLB page since the head vmemmap page frame - * is reused (more details can refer to the comments above - * page_fixed_fake_head()). + * HugeTLB pages are always fully mapped and only head page will be + * set PG_dcache_clean (see comments in __sync_icache_dcache()). */ - if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page)) + if (PageHuge(page)) page = compound_head(page); if (test_bit(PG_dcache_clean, &page->flags)) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f2ff65f1bf838..3702f60427d6a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -208,12 +208,6 @@ enum pageflags { DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, hugetlb_optimize_vmemmap_key); -static __always_inline bool hugetlb_optimize_vmemmap_enabled(void) -{ - return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - &hugetlb_optimize_vmemmap_key); -} - /* * If the feature of optimizing vmemmap pages associated with each HugeTLB * page is enabled, the head vmemmap page frame is reused and all of the tail @@ -232,7 +226,8 @@ static __always_inline bool hugetlb_optimize_vmemmap_enabled(void) */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { - if (!hugetlb_optimize_vmemmap_enabled()) + if (!static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, + &hugetlb_optimize_vmemmap_key)) return page; /* @@ -260,11 +255,6 @@ static inline const struct page *page_fixed_fake_head(const struct page *page) { return page; } - -static inline bool hugetlb_optimize_vmemmap_enabled(void) -{ - return false; -} #endif static __always_inline int page_is_fake_head(struct page *page) From e3edcd800d1de0a7a3faa6e7bb7157b10ac02ce1 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:29 +0800 Subject: [PATCH 217/321] mm: hugetlb_vmemmap: optimize vmemmap_optimize_mode handling We hold an another reference to hugetlb_optimize_vmemmap_key when making vmemmap_optimize_mode on, because we use static_key to tell memory_hotplug that memory_hotplug.memmap_on_memory should be overridden. However, this rule has gone when we have introduced PageVmemmapSelfHosted. Therefore, we could simplify vmemmap_optimize_mode handling by not holding an another reference to hugetlb_optimize_vmemmap_key. This also means that we not incur the extra page_fixed_fake_head checks if there are no vmemmap optinmized hugetlb pages after this change. Link: https://lkml.kernel.org/r/20220628092235.91270-3-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Oscar Salvador Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 6 ++-- mm/hugetlb_vmemmap.c | 65 ++++---------------------------------- 2 files changed, 9 insertions(+), 62 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 3702f60427d6a..7477e21bb85e8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -205,8 +205,7 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - hugetlb_optimize_vmemmap_key); +DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); /* * If the feature of optimizing vmemmap pages associated with each HugeTLB @@ -226,8 +225,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { - if (!static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - &hugetlb_optimize_vmemmap_key)) + if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) return page; /* diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 1362feb3c6c98..e5b83a25c2fa8 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -23,42 +23,15 @@ #define RESERVE_VMEMMAP_NR 1U #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) -enum vmemmap_optimize_mode { - VMEMMAP_OPTIMIZE_OFF, - VMEMMAP_OPTIMIZE_ON, -}; - -DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - hugetlb_optimize_vmemmap_key); +DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); -static enum vmemmap_optimize_mode vmemmap_optimize_mode = +static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); -static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to) -{ - if (vmemmap_optimize_mode == to) - return; - - if (to == VMEMMAP_OPTIMIZE_OFF) - static_branch_dec(&hugetlb_optimize_vmemmap_key); - else - static_branch_inc(&hugetlb_optimize_vmemmap_key); - WRITE_ONCE(vmemmap_optimize_mode, to); -} - static int __init hugetlb_vmemmap_early_param(char *buf) { - bool enable; - enum vmemmap_optimize_mode mode; - - if (kstrtobool(buf, &enable)) - return -EINVAL; - - mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF; - vmemmap_optimize_mode_switch(mode); - - return 0; + return kstrtobool(buf, &vmemmap_optimize_enabled); } early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param); @@ -100,7 +73,7 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head) static unsigned int vmemmap_optimizable_pages(struct hstate *h, struct page *head) { - if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF) + if (!READ_ONCE(vmemmap_optimize_enabled)) return 0; if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) { @@ -191,7 +164,6 @@ void __init hugetlb_vmemmap_init(struct hstate *h) if (!is_power_of_2(sizeof(struct page))) { pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n"); - static_branch_disable(&hugetlb_optimize_vmemmap_key); return; } @@ -212,36 +184,13 @@ void __init hugetlb_vmemmap_init(struct hstate *h) } #ifdef CONFIG_PROC_SYSCTL -static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, - loff_t *ppos) -{ - int ret; - enum vmemmap_optimize_mode mode; - static DEFINE_MUTEX(sysctl_mutex); - - if (write && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - mutex_lock(&sysctl_mutex); - mode = vmemmap_optimize_mode; - table->data = &mode; - ret = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (write && !ret) - vmemmap_optimize_mode_switch(mode); - mutex_unlock(&sysctl_mutex); - - return ret; -} - static struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", - .maxlen = sizeof(enum vmemmap_optimize_mode), + .data = &vmemmap_optimize_enabled, + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = hugetlb_optimize_vmemmap_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .proc_handler = proc_dobool, }, { } }; From 822681f681848becf81c622f0cd85068899d6f19 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:30 +0800 Subject: [PATCH 218/321] mm: hugetlb_vmemmap: introduce the name HVO It it inconvenient to mention the feature of optimizing vmemmap pages associated with HugeTLB pages when communicating with others since there is no specific or abbreviated name for it when it is first introduced. Let us give it a name HVO (HugeTLB Vmemmap Optimization) from now. This commit also updates the document about "hugetlb_free_vmemmap" by the way discussed in thread [1]. Link: https://lore.kernel.org/all/21aae898-d54d-cc4b-a11f-1bb7fddcfffa@redhat.com/ [1] Link: https://lkml.kernel.org/r/20220628092235.91270-4-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Oscar Salvador Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- Documentation/admin-guide/kernel-parameters.txt | 7 ++++--- Documentation/admin-guide/mm/hugetlbpage.rst | 4 ++-- Documentation/admin-guide/mm/memory-hotplug.rst | 4 ++-- Documentation/admin-guide/sysctl/vm.rst | 3 +-- Documentation/mm/vmemmap_dedup.rst | 2 ++ fs/Kconfig | 12 +++++------- include/linux/page-flags.h | 3 +-- mm/hugetlb_vmemmap.c | 8 ++++---- mm/hugetlb_vmemmap.h | 4 ++-- 9 files changed, 23 insertions(+), 24 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 2cacd4f8deb75..764577db97150 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1712,12 +1712,13 @@ hugetlb_free_vmemmap= [KNL] Reguires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP enabled. + Control if HugeTLB Vmemmap Optimization (HVO) is enabled. Allows heavy hugetlb users to free up some more memory (7 * PAGE_SIZE for each 2MB hugetlb page). - Format: { [oO][Nn]/Y/y/1 | [oO][Ff]/N/n/0 (default) } + Format: { on | off (default) } - [oO][Nn]/Y/y/1: enable the feature - [oO][Ff]/N/n/0: disable the feature + on: enable HVO + off: disable HVO Built with CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y, the default is on. diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst index a90330d0a8373..8e2727dc18d4d 100644 --- a/Documentation/admin-guide/mm/hugetlbpage.rst +++ b/Documentation/admin-guide/mm/hugetlbpage.rst @@ -164,8 +164,8 @@ default_hugepagesz will all result in 256 2M huge pages being allocated. Valid default huge page size is architecture dependent. hugetlb_free_vmemmap - When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables optimizing - unused vmemmap pages associated with each HugeTLB page. + When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables HugeTLB + Vmemmap Optimization (HVO). When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages`` indicates the current number of pre-allocated huge pages of the default size. diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst index 0f56ecd8ac054..a3c9e8ad8fa0d 100644 --- a/Documentation/admin-guide/mm/memory-hotplug.rst +++ b/Documentation/admin-guide/mm/memory-hotplug.rst @@ -653,8 +653,8 @@ block might fail: - Concurrent activity that operates on the same physical memory area, such as allocating gigantic pages, can result in temporary offlining failures. -- Out of memory when dissolving huge pages, especially when freeing unused - vmemmap pages associated with each hugetlb page is enabled. +- Out of memory when dissolving huge pages, especially when HugeTLB Vmemmap + Optimization (HVO) is enabled. Offlining code may be able to migrate huge page contents, but may not be able to dissolve the source huge page because it fails allocating (unmovable) pages diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 5a810ea6dc77f..b6042076816fa 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -569,8 +569,7 @@ This knob is not available when the size of 'struct page' (a structure defined in include/linux/mm_types.h) is not power of two (an unusual system config could result in this). -Enable (set to 1) or disable (set to 0) the feature of optimizing vmemmap pages -associated with each HugeTLB page. +Enable (set to 1) or disable (set to 0) HugeTLB Vmemmap Optimization (HVO). Once enabled, the vmemmap pages of subsequent allocation of HugeTLB pages from buddy allocator will be optimized (7 pages per 2MB HugeTLB page and 4095 pages diff --git a/Documentation/mm/vmemmap_dedup.rst b/Documentation/mm/vmemmap_dedup.rst index c9c495f62d123..7d7a161aa3646 100644 --- a/Documentation/mm/vmemmap_dedup.rst +++ b/Documentation/mm/vmemmap_dedup.rst @@ -7,6 +7,8 @@ A vmemmap diet for HugeTLB and Device DAX HugeTLB ======= +This section is to explain how HugeTLB Vmemmap Optimization (HVO) works. + The struct page structures (page structs) are used to describe a physical page frame. By default, there is a one-to-one mapping from a page frame to it's corresponding page struct. diff --git a/fs/Kconfig b/fs/Kconfig index 5976eb33535ff..a547307c1ae82 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -247,8 +247,7 @@ config HUGETLB_PAGE # # Select this config option from the architecture Kconfig, if it is preferred -# to enable the feature of minimizing overhead of struct page associated with -# each HugeTLB page. +# to enable the feature of HugeTLB Vmemmap Optimization (HVO). # config ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP bool @@ -259,14 +258,13 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP depends on SPARSEMEM_VMEMMAP config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON - bool "Default optimizing vmemmap pages of HugeTLB to on" + bool "HugeTLB Vmemmap Optimization (HVO) defaults to on" default n depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP help - When using HUGETLB_PAGE_OPTIMIZE_VMEMMAP, the optimizing unused vmemmap - pages associated with each HugeTLB page is default off. Say Y here - to enable optimizing vmemmap pages of HugeTLB by default. It can then - be disabled on the command line via hugetlb_free_vmemmap=off. + The HugeTLB VmemmapvOptimization (HVO) defaults to off. Say Y here to + enable HVO by default. It can be disabled via hugetlb_free_vmemmap=off + (boot command line) or hugetlb_optimize_vmemmap (sysctl). config MEMFD_CREATE def_bool TMPFS || HUGETLBFS diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 7477e21bb85e8..a2ada8c75d5e8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -208,8 +208,7 @@ enum pageflags { DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); /* - * If the feature of optimizing vmemmap pages associated with each HugeTLB - * page is enabled, the head vmemmap page frame is reused and all of the tail + * If HVO is enabled, the head vmemmap page frame is reused and all of the tail * vmemmap addresses map to the head vmemmap page frame (furture details can * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other * words, there are more than one page struct with PG_head associated with each diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index e5b83a25c2fa8..bcafd9d7639cf 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Optimize vmemmap pages associated with HugeTLB + * HugeTLB Vmemmap Optimization (HVO) * - * Copyright (c) 2020, Bytedance. All rights reserved. + * Copyright (c) 2020, ByteDance. All rights reserved. * * Author: Muchun Song * @@ -156,8 +156,8 @@ void __init hugetlb_vmemmap_init(struct hstate *h) /* * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct - * page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP, - * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page. + * page structs that can be used when HVO is enabled, add a BUILD_BUG_ON + * to catch invalid usage of the tail page structs. */ BUILD_BUG_ON(__NR_USED_SUBPAGE >= RESERVE_VMEMMAP_SIZE / sizeof(struct page)); diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 109b0a53b6fe9..ba66fadad9fca 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Optimize vmemmap pages associated with HugeTLB + * HugeTLB Vmemmap Optimization (HVO) * - * Copyright (c) 2020, Bytedance. All rights reserved. + * Copyright (c) 2020, ByteDance. All rights reserved. * * Author: Muchun Song */ From b7b717fb89a27caa86abb8b984ffaba3e45bb57d Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:31 +0800 Subject: [PATCH 219/321] mm: hugetlb_vmemmap: move vmemmap code related to HugeTLB to hugetlb_vmemmap.c When I first introduced vmemmap manipulation functions related to HugeTLB, I thought those functions may be reused by other modules (e.g. using similar approach to optimize vmemmap pages, unfortunately, the DAX used the same approach but does not use those functions). After two years, we didn't see any other users. So move those functions to hugetlb_vmemmap.c. Code movement without any functional change. Link: https://lkml.kernel.org/r/20220628092235.91270-5-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Oscar Salvador Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- include/linux/mm.h | 7 - mm/hugetlb_vmemmap.c | 399 ++++++++++++++++++++++++++++++++++++++++++- mm/sparse-vmemmap.c | 399 ------------------------------------------- 3 files changed, 398 insertions(+), 407 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e9f7c6ffe55bd..4287bec50c286 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3154,13 +3154,6 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -int vmemmap_remap_free(unsigned long start, unsigned long end, - unsigned long reuse); -int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask); -#endif - void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index bcafd9d7639cf..f68e216600b91 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -10,9 +10,31 @@ */ #define pr_fmt(fmt) "HugeTLB: " fmt -#include +#include +#include +#include +#include #include "hugetlb_vmemmap.h" +/** + * struct vmemmap_remap_walk - walk vmemmap page table + * + * @remap_pte: called for each lowest-level entry (PTE). + * @nr_walked: the number of walked pte. + * @reuse_page: the page which is reused for the tail vmemmap pages. + * @reuse_addr: the virtual address of the @reuse_page page. + * @vmemmap_pages: the list head of the vmemmap pages that can be freed + * or is mapped from. + */ +struct vmemmap_remap_walk { + void (*remap_pte)(pte_t *pte, unsigned long addr, + struct vmemmap_remap_walk *walk); + unsigned long nr_walked; + struct page *reuse_page; + unsigned long reuse_addr; + struct list_head *vmemmap_pages; +}; + /* * There are a lot of struct page structures associated with each HugeTLB page. * For tail pages, the value of compound_head is the same. So we can reuse first @@ -23,6 +45,381 @@ #define RESERVE_VMEMMAP_NR 1U #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) +static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) +{ + pmd_t __pmd; + int i; + unsigned long addr = start; + struct page *page = pmd_page(*pmd); + pte_t *pgtable = pte_alloc_one_kernel(&init_mm); + + if (!pgtable) + return -ENOMEM; + + pmd_populate_kernel(&init_mm, &__pmd, pgtable); + + for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) { + pte_t entry, *pte; + pgprot_t pgprot = PAGE_KERNEL; + + entry = mk_pte(page + i, pgprot); + pte = pte_offset_kernel(&__pmd, addr); + set_pte_at(&init_mm, addr, pte, entry); + } + + spin_lock(&init_mm.page_table_lock); + if (likely(pmd_leaf(*pmd))) { + /* + * Higher order allocations from buddy allocator must be able to + * be treated as indepdenent small pages (as they can be freed + * individually). + */ + if (!PageReserved(page)) + split_page(page, get_order(PMD_SIZE)); + + /* Make pte visible before pmd. See comment in pmd_install(). */ + smp_wmb(); + pmd_populate_kernel(&init_mm, pmd, pgtable); + flush_tlb_kernel_range(start, start + PMD_SIZE); + } else { + pte_free_kernel(&init_mm, pgtable); + } + spin_unlock(&init_mm.page_table_lock); + + return 0; +} + +static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) +{ + int leaf; + + spin_lock(&init_mm.page_table_lock); + leaf = pmd_leaf(*pmd); + spin_unlock(&init_mm.page_table_lock); + + if (!leaf) + return 0; + + return __split_vmemmap_huge_pmd(pmd, start); +} + +static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, + struct vmemmap_remap_walk *walk) +{ + pte_t *pte = pte_offset_kernel(pmd, addr); + + /* + * The reuse_page is found 'first' in table walk before we start + * remapping (which is calling @walk->remap_pte). + */ + if (!walk->reuse_page) { + walk->reuse_page = pte_page(*pte); + /* + * Because the reuse address is part of the range that we are + * walking, skip the reuse address range. + */ + addr += PAGE_SIZE; + pte++; + walk->nr_walked++; + } + + for (; addr != end; addr += PAGE_SIZE, pte++) { + walk->remap_pte(pte, addr, walk); + walk->nr_walked++; + } +} + +static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, + unsigned long end, + struct vmemmap_remap_walk *walk) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + int ret; + + ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK); + if (ret) + return ret; + + next = pmd_addr_end(addr, end); + vmemmap_pte_range(pmd, addr, next, walk); + } while (pmd++, addr = next, addr != end); + + return 0; +} + +static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, + unsigned long end, + struct vmemmap_remap_walk *walk) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(p4d, addr); + do { + int ret; + + next = pud_addr_end(addr, end); + ret = vmemmap_pmd_range(pud, addr, next, walk); + if (ret) + return ret; + } while (pud++, addr = next, addr != end); + + return 0; +} + +static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, + unsigned long end, + struct vmemmap_remap_walk *walk) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_offset(pgd, addr); + do { + int ret; + + next = p4d_addr_end(addr, end); + ret = vmemmap_pud_range(p4d, addr, next, walk); + if (ret) + return ret; + } while (p4d++, addr = next, addr != end); + + return 0; +} + +static int vmemmap_remap_range(unsigned long start, unsigned long end, + struct vmemmap_remap_walk *walk) +{ + unsigned long addr = start; + unsigned long next; + pgd_t *pgd; + + VM_BUG_ON(!PAGE_ALIGNED(start)); + VM_BUG_ON(!PAGE_ALIGNED(end)); + + pgd = pgd_offset_k(addr); + do { + int ret; + + next = pgd_addr_end(addr, end); + ret = vmemmap_p4d_range(pgd, addr, next, walk); + if (ret) + return ret; + } while (pgd++, addr = next, addr != end); + + /* + * We only change the mapping of the vmemmap virtual address range + * [@start + PAGE_SIZE, end), so we only need to flush the TLB which + * belongs to the range. + */ + flush_tlb_kernel_range(start + PAGE_SIZE, end); + + return 0; +} + +/* + * Free a vmemmap page. A vmemmap page can be allocated from the memblock + * allocator or buddy allocator. If the PG_reserved flag is set, it means + * that it allocated from the memblock allocator, just free it via the + * free_bootmem_page(). Otherwise, use __free_page(). + */ +static inline void free_vmemmap_page(struct page *page) +{ + if (PageReserved(page)) + free_bootmem_page(page); + else + __free_page(page); +} + +/* Free a list of the vmemmap pages */ +static void free_vmemmap_page_list(struct list_head *list) +{ + struct page *page, *next; + + list_for_each_entry_safe(page, next, list, lru) { + list_del(&page->lru); + free_vmemmap_page(page); + } +} + +static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, + struct vmemmap_remap_walk *walk) +{ + /* + * Remap the tail pages as read-only to catch illegal write operation + * to the tail pages. + */ + pgprot_t pgprot = PAGE_KERNEL_RO; + pte_t entry = mk_pte(walk->reuse_page, pgprot); + struct page *page = pte_page(*pte); + + list_add_tail(&page->lru, walk->vmemmap_pages); + set_pte_at(&init_mm, addr, pte, entry); +} + +/* + * How many struct page structs need to be reset. When we reuse the head + * struct page, the special metadata (e.g. page->flags or page->mapping) + * cannot copy to the tail struct page structs. The invalid value will be + * checked in the free_tail_pages_check(). In order to avoid the message + * of "corrupted mapping in tail page". We need to reset at least 3 (one + * head struct page struct and two tail struct page structs) struct page + * structs. + */ +#define NR_RESET_STRUCT_PAGE 3 + +static inline void reset_struct_pages(struct page *start) +{ + int i; + struct page *from = start + NR_RESET_STRUCT_PAGE; + + for (i = 0; i < NR_RESET_STRUCT_PAGE; i++) + memcpy(start + i, from, sizeof(*from)); +} + +static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, + struct vmemmap_remap_walk *walk) +{ + pgprot_t pgprot = PAGE_KERNEL; + struct page *page; + void *to; + + BUG_ON(pte_page(*pte) != walk->reuse_page); + + page = list_first_entry(walk->vmemmap_pages, struct page, lru); + list_del(&page->lru); + to = page_to_virt(page); + copy_page(to, (void *)walk->reuse_addr); + reset_struct_pages(to); + + set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); +} + +/** + * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) + * to the page which @reuse is mapped to, then free vmemmap + * which the range are mapped to. + * @start: start address of the vmemmap virtual address range that we want + * to remap. + * @end: end address of the vmemmap virtual address range that we want to + * remap. + * @reuse: reuse address. + * + * Return: %0 on success, negative error code otherwise. + */ +static int vmemmap_remap_free(unsigned long start, unsigned long end, + unsigned long reuse) +{ + int ret; + LIST_HEAD(vmemmap_pages); + struct vmemmap_remap_walk walk = { + .remap_pte = vmemmap_remap_pte, + .reuse_addr = reuse, + .vmemmap_pages = &vmemmap_pages, + }; + + /* + * In order to make remapping routine most efficient for the huge pages, + * the routine of vmemmap page table walking has the following rules + * (see more details from the vmemmap_pte_range()): + * + * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) + * should be continuous. + * - The @reuse address is part of the range [@reuse, @end) that we are + * walking which is passed to vmemmap_remap_range(). + * - The @reuse address is the first in the complete range. + * + * So we need to make sure that @start and @reuse meet the above rules. + */ + BUG_ON(start - reuse != PAGE_SIZE); + + mmap_read_lock(&init_mm); + ret = vmemmap_remap_range(reuse, end, &walk); + if (ret && walk.nr_walked) { + end = reuse + walk.nr_walked * PAGE_SIZE; + /* + * vmemmap_pages contains pages from the previous + * vmemmap_remap_range call which failed. These + * are pages which were removed from the vmemmap. + * They will be restored in the following call. + */ + walk = (struct vmemmap_remap_walk) { + .remap_pte = vmemmap_restore_pte, + .reuse_addr = reuse, + .vmemmap_pages = &vmemmap_pages, + }; + + vmemmap_remap_range(reuse, end, &walk); + } + mmap_read_unlock(&init_mm); + + free_vmemmap_page_list(&vmemmap_pages); + + return ret; +} + +static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, + gfp_t gfp_mask, struct list_head *list) +{ + unsigned long nr_pages = (end - start) >> PAGE_SHIFT; + int nid = page_to_nid((struct page *)start); + struct page *page, *next; + + while (nr_pages--) { + page = alloc_pages_node(nid, gfp_mask, 0); + if (!page) + goto out; + list_add_tail(&page->lru, list); + } + + return 0; +out: + list_for_each_entry_safe(page, next, list, lru) + __free_pages(page, 0); + return -ENOMEM; +} + +/** + * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) + * to the page which is from the @vmemmap_pages + * respectively. + * @start: start address of the vmemmap virtual address range that we want + * to remap. + * @end: end address of the vmemmap virtual address range that we want to + * remap. + * @reuse: reuse address. + * @gfp_mask: GFP flag for allocating vmemmap pages. + * + * Return: %0 on success, negative error code otherwise. + */ +static int vmemmap_remap_alloc(unsigned long start, unsigned long end, + unsigned long reuse, gfp_t gfp_mask) +{ + LIST_HEAD(vmemmap_pages); + struct vmemmap_remap_walk walk = { + .remap_pte = vmemmap_restore_pte, + .reuse_addr = reuse, + .vmemmap_pages = &vmemmap_pages, + }; + + /* See the comment in the vmemmap_remap_free(). */ + BUG_ON(start - reuse != PAGE_SIZE); + + if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) + return -ENOMEM; + + mmap_read_lock(&init_mm); + vmemmap_remap_range(reuse, end, &walk); + mmap_read_unlock(&init_mm); + + return 0; +} + DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index f9ddeaa2fbdfc..d957d3e368380 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -27,408 +27,9 @@ #include #include #include -#include -#include #include #include -#include - -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -/** - * struct vmemmap_remap_walk - walk vmemmap page table - * - * @remap_pte: called for each lowest-level entry (PTE). - * @nr_walked: the number of walked pte. - * @reuse_page: the page which is reused for the tail vmemmap pages. - * @reuse_addr: the virtual address of the @reuse_page page. - * @vmemmap_pages: the list head of the vmemmap pages that can be freed - * or is mapped from. - */ -struct vmemmap_remap_walk { - void (*remap_pte)(pte_t *pte, unsigned long addr, - struct vmemmap_remap_walk *walk); - unsigned long nr_walked; - struct page *reuse_page; - unsigned long reuse_addr; - struct list_head *vmemmap_pages; -}; - -static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) -{ - pmd_t __pmd; - int i; - unsigned long addr = start; - struct page *page = pmd_page(*pmd); - pte_t *pgtable = pte_alloc_one_kernel(&init_mm); - - if (!pgtable) - return -ENOMEM; - - pmd_populate_kernel(&init_mm, &__pmd, pgtable); - - for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) { - pte_t entry, *pte; - pgprot_t pgprot = PAGE_KERNEL; - - entry = mk_pte(page + i, pgprot); - pte = pte_offset_kernel(&__pmd, addr); - set_pte_at(&init_mm, addr, pte, entry); - } - - spin_lock(&init_mm.page_table_lock); - if (likely(pmd_leaf(*pmd))) { - /* - * Higher order allocations from buddy allocator must be able to - * be treated as indepdenent small pages (as they can be freed - * individually). - */ - if (!PageReserved(page)) - split_page(page, get_order(PMD_SIZE)); - - /* Make pte visible before pmd. See comment in pmd_install(). */ - smp_wmb(); - pmd_populate_kernel(&init_mm, pmd, pgtable); - flush_tlb_kernel_range(start, start + PMD_SIZE); - } else { - pte_free_kernel(&init_mm, pgtable); - } - spin_unlock(&init_mm.page_table_lock); - - return 0; -} - -static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) -{ - int leaf; - - spin_lock(&init_mm.page_table_lock); - leaf = pmd_leaf(*pmd); - spin_unlock(&init_mm.page_table_lock); - - if (!leaf) - return 0; - - return __split_vmemmap_huge_pmd(pmd, start); -} - -static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, - unsigned long end, - struct vmemmap_remap_walk *walk) -{ - pte_t *pte = pte_offset_kernel(pmd, addr); - - /* - * The reuse_page is found 'first' in table walk before we start - * remapping (which is calling @walk->remap_pte). - */ - if (!walk->reuse_page) { - walk->reuse_page = pte_page(*pte); - /* - * Because the reuse address is part of the range that we are - * walking, skip the reuse address range. - */ - addr += PAGE_SIZE; - pte++; - walk->nr_walked++; - } - - for (; addr != end; addr += PAGE_SIZE, pte++) { - walk->remap_pte(pte, addr, walk); - walk->nr_walked++; - } -} - -static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, - unsigned long end, - struct vmemmap_remap_walk *walk) -{ - pmd_t *pmd; - unsigned long next; - - pmd = pmd_offset(pud, addr); - do { - int ret; - - ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK); - if (ret) - return ret; - - next = pmd_addr_end(addr, end); - vmemmap_pte_range(pmd, addr, next, walk); - } while (pmd++, addr = next, addr != end); - - return 0; -} - -static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, - unsigned long end, - struct vmemmap_remap_walk *walk) -{ - pud_t *pud; - unsigned long next; - - pud = pud_offset(p4d, addr); - do { - int ret; - - next = pud_addr_end(addr, end); - ret = vmemmap_pmd_range(pud, addr, next, walk); - if (ret) - return ret; - } while (pud++, addr = next, addr != end); - - return 0; -} - -static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, - unsigned long end, - struct vmemmap_remap_walk *walk) -{ - p4d_t *p4d; - unsigned long next; - - p4d = p4d_offset(pgd, addr); - do { - int ret; - - next = p4d_addr_end(addr, end); - ret = vmemmap_pud_range(p4d, addr, next, walk); - if (ret) - return ret; - } while (p4d++, addr = next, addr != end); - - return 0; -} - -static int vmemmap_remap_range(unsigned long start, unsigned long end, - struct vmemmap_remap_walk *walk) -{ - unsigned long addr = start; - unsigned long next; - pgd_t *pgd; - - VM_BUG_ON(!PAGE_ALIGNED(start)); - VM_BUG_ON(!PAGE_ALIGNED(end)); - - pgd = pgd_offset_k(addr); - do { - int ret; - - next = pgd_addr_end(addr, end); - ret = vmemmap_p4d_range(pgd, addr, next, walk); - if (ret) - return ret; - } while (pgd++, addr = next, addr != end); - - /* - * We only change the mapping of the vmemmap virtual address range - * [@start + PAGE_SIZE, end), so we only need to flush the TLB which - * belongs to the range. - */ - flush_tlb_kernel_range(start + PAGE_SIZE, end); - - return 0; -} - -/* - * Free a vmemmap page. A vmemmap page can be allocated from the memblock - * allocator or buddy allocator. If the PG_reserved flag is set, it means - * that it allocated from the memblock allocator, just free it via the - * free_bootmem_page(). Otherwise, use __free_page(). - */ -static inline void free_vmemmap_page(struct page *page) -{ - if (PageReserved(page)) - free_bootmem_page(page); - else - __free_page(page); -} - -/* Free a list of the vmemmap pages */ -static void free_vmemmap_page_list(struct list_head *list) -{ - struct page *page, *next; - - list_for_each_entry_safe(page, next, list, lru) { - list_del(&page->lru); - free_vmemmap_page(page); - } -} - -static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, - struct vmemmap_remap_walk *walk) -{ - /* - * Remap the tail pages as read-only to catch illegal write operation - * to the tail pages. - */ - pgprot_t pgprot = PAGE_KERNEL_RO; - pte_t entry = mk_pte(walk->reuse_page, pgprot); - struct page *page = pte_page(*pte); - - list_add_tail(&page->lru, walk->vmemmap_pages); - set_pte_at(&init_mm, addr, pte, entry); -} - -/* - * How many struct page structs need to be reset. When we reuse the head - * struct page, the special metadata (e.g. page->flags or page->mapping) - * cannot copy to the tail struct page structs. The invalid value will be - * checked in the free_tail_pages_check(). In order to avoid the message - * of "corrupted mapping in tail page". We need to reset at least 3 (one - * head struct page struct and two tail struct page structs) struct page - * structs. - */ -#define NR_RESET_STRUCT_PAGE 3 - -static inline void reset_struct_pages(struct page *start) -{ - int i; - struct page *from = start + NR_RESET_STRUCT_PAGE; - - for (i = 0; i < NR_RESET_STRUCT_PAGE; i++) - memcpy(start + i, from, sizeof(*from)); -} - -static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, - struct vmemmap_remap_walk *walk) -{ - pgprot_t pgprot = PAGE_KERNEL; - struct page *page; - void *to; - - BUG_ON(pte_page(*pte) != walk->reuse_page); - - page = list_first_entry(walk->vmemmap_pages, struct page, lru); - list_del(&page->lru); - to = page_to_virt(page); - copy_page(to, (void *)walk->reuse_addr); - reset_struct_pages(to); - - set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); -} - -/** - * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) - * to the page which @reuse is mapped to, then free vmemmap - * which the range are mapped to. - * @start: start address of the vmemmap virtual address range that we want - * to remap. - * @end: end address of the vmemmap virtual address range that we want to - * remap. - * @reuse: reuse address. - * - * Return: %0 on success, negative error code otherwise. - */ -int vmemmap_remap_free(unsigned long start, unsigned long end, - unsigned long reuse) -{ - int ret; - LIST_HEAD(vmemmap_pages); - struct vmemmap_remap_walk walk = { - .remap_pte = vmemmap_remap_pte, - .reuse_addr = reuse, - .vmemmap_pages = &vmemmap_pages, - }; - - /* - * In order to make remapping routine most efficient for the huge pages, - * the routine of vmemmap page table walking has the following rules - * (see more details from the vmemmap_pte_range()): - * - * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) - * should be continuous. - * - The @reuse address is part of the range [@reuse, @end) that we are - * walking which is passed to vmemmap_remap_range(). - * - The @reuse address is the first in the complete range. - * - * So we need to make sure that @start and @reuse meet the above rules. - */ - BUG_ON(start - reuse != PAGE_SIZE); - - mmap_read_lock(&init_mm); - ret = vmemmap_remap_range(reuse, end, &walk); - if (ret && walk.nr_walked) { - end = reuse + walk.nr_walked * PAGE_SIZE; - /* - * vmemmap_pages contains pages from the previous - * vmemmap_remap_range call which failed. These - * are pages which were removed from the vmemmap. - * They will be restored in the following call. - */ - walk = (struct vmemmap_remap_walk) { - .remap_pte = vmemmap_restore_pte, - .reuse_addr = reuse, - .vmemmap_pages = &vmemmap_pages, - }; - - vmemmap_remap_range(reuse, end, &walk); - } - mmap_read_unlock(&init_mm); - - free_vmemmap_page_list(&vmemmap_pages); - - return ret; -} - -static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, - gfp_t gfp_mask, struct list_head *list) -{ - unsigned long nr_pages = (end - start) >> PAGE_SHIFT; - int nid = page_to_nid((struct page *)start); - struct page *page, *next; - - while (nr_pages--) { - page = alloc_pages_node(nid, gfp_mask, 0); - if (!page) - goto out; - list_add_tail(&page->lru, list); - } - - return 0; -out: - list_for_each_entry_safe(page, next, list, lru) - __free_pages(page, 0); - return -ENOMEM; -} - -/** - * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) - * to the page which is from the @vmemmap_pages - * respectively. - * @start: start address of the vmemmap virtual address range that we want - * to remap. - * @end: end address of the vmemmap virtual address range that we want to - * remap. - * @reuse: reuse address. - * @gfp_mask: GFP flag for allocating vmemmap pages. - * - * Return: %0 on success, negative error code otherwise. - */ -int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask) -{ - LIST_HEAD(vmemmap_pages); - struct vmemmap_remap_walk walk = { - .remap_pte = vmemmap_restore_pte, - .reuse_addr = reuse, - .vmemmap_pages = &vmemmap_pages, - }; - - /* See the comment in the vmemmap_remap_free(). */ - BUG_ON(start - reuse != PAGE_SIZE); - - if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) - return -ENOMEM; - - mmap_read_lock(&init_mm); - vmemmap_remap_range(reuse, end, &walk); - mmap_read_unlock(&init_mm); - - return 0; -} -#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ /* * Allocate a block of memory to be used to back the virtual memory map From 177a67e8ce5e6a36cf44549861c5ccab5dee2103 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:32 +0800 Subject: [PATCH 220/321] mm: hugetlb_vmemmap: replace early_param() with core_param() After the following commit: 78f39084b41d ("mm: hugetlb_vmemmap: add hugetlb_optimize_vmemmap sysctl") There is no order requirement between the parameter of "hugetlb_free_vmemmap" and "hugepages" since we have removed the check of whether HVO is enabled from hugetlb_vmemmap_init(). Therefore we can safely replace early_param() with core_param() to simplify the code. Link: https://lkml.kernel.org/r/20220628092235.91270-6-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Oscar Salvador Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- mm/hugetlb_vmemmap.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index f68e216600b91..6c7117c30e566 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -423,14 +423,8 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); -static bool vmemmap_optimize_enabled = - IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); - -static int __init hugetlb_vmemmap_early_param(char *buf) -{ - return kstrtobool(buf, &vmemmap_optimize_enabled); -} -early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param); +static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); +core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); /* * Previously discarded vmemmap pages will be allocated and remapping From 3fee81d1f8177d2a339241db2d50b5ba6564aba2 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:33 +0800 Subject: [PATCH 221/321] mm: hugetlb_vmemmap: improve hugetlb_vmemmap code readability There is a discussion about the name of hugetlb_vmemmap_alloc/free in thread [1]. The suggestion suggested by David is rename "alloc/free" to "optimize/restore" to make functionalities clearer to users, "optimize" means the function will optimize vmemmap pages, while "restore" means restoring its vmemmap pages discared before. This commit does this. Another discussion is the confusion RESERVE_VMEMMAP_NR isn't used explicitly for vmemmap_addr but implicitly for vmemmap_end in hugetlb_vmemmap_alloc/free. David suggested we can compute what hugetlb_vmemmap_init() does now at runtime. We do not need to worry for the overhead of computing at runtime since the calculation is simple enough and those functions are not in a hot path. This commit has the following improvements: 1) The function suffixed name ("optimize/restore") is more expressive. 2) The logic becomes less weird in hugetlb_vmemmap_optimize/restore(). 3) The hugetlb_vmemmap_init() does not need to be exported anymore. 4) A ->optimize_vmemmap_pages field in struct hstate is killed. 5) There is only one place where checks is_power_of_2(sizeof(struct page)) instead of two places. 6) Add more comments for hugetlb_vmemmap_optimize/restore(). 7) For external users, hugetlb_optimize_vmemmap_pages() is used for detecting if the HugeTLB's vmemmap pages is optimizable originally. In this commit, it is killed and we introduce a new helper hugetlb_vmemmap_optimizable() to replace it. The name is more expressive. Link: https://lore.kernel.org/all/20220404074652.68024-2-songmuchun@bytedance.com/ [1] Link: https://lkml.kernel.org/r/20220628092235.91270-7-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Oscar Salvador Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 7 +- include/linux/sysctl.h | 4 ++ mm/hugetlb.c | 15 +++-- mm/hugetlb_vmemmap.c | 143 +++++++++++++++++----------------------- mm/hugetlb_vmemmap.h | 41 ++++++++---- 5 files changed, 102 insertions(+), 108 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 75ee739d815b9..dce46d5715755 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -638,9 +638,6 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP - unsigned int optimize_vmemmap_pages; -#endif #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ struct cftype cgroup_files_dfl[8]; @@ -716,7 +713,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma) return hstate_file(vma->vm_file); } -static inline unsigned long huge_page_size(struct hstate *h) +static inline unsigned long huge_page_size(const struct hstate *h) { return (unsigned long)PAGE_SIZE << h->order; } @@ -745,7 +742,7 @@ static inline bool hstate_is_gigantic(struct hstate *h) return huge_page_order(h) >= MAX_ORDER; } -static inline unsigned int pages_per_huge_page(struct hstate *h) +static inline unsigned int pages_per_huge_page(const struct hstate *h) { return 1 << h->order; } diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 80263f7cdb776..5a227b9e3ad52 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -266,6 +266,10 @@ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * return NULL; } +static inline void register_sysctl_init(const char *path, struct ctl_table *table) +{ +} + static inline struct ctl_table_header *register_sysctl_mount_point(const char *path) { return NULL; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 08e88d04a2d6d..321384e9e7460 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1535,7 +1535,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; - if (hugetlb_vmemmap_alloc(h, page)) { + if (hugetlb_vmemmap_restore(h, page)) { spin_lock_irq(&hugetlb_lock); /* * If we cannot allocate vmemmap pages, just refuse to free the @@ -1612,7 +1612,7 @@ static DECLARE_WORK(free_hpage_work, free_hpage_workfn); static inline void flush_free_hpage_work(struct hstate *h) { - if (hugetlb_optimize_vmemmap_pages(h)) + if (hugetlb_vmemmap_optimizable(h)) flush_work(&free_hpage_work); } @@ -1734,7 +1734,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid) static void __prep_new_huge_page(struct hstate *h, struct page *page) { - hugetlb_vmemmap_free(h, page); + hugetlb_vmemmap_optimize(h, page); INIT_LIST_HEAD(&page->lru); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); hugetlb_set_page_subpool(page, NULL); @@ -2107,7 +2107,7 @@ int dissolve_free_huge_page(struct page *page) * Attempt to allocate vmemmmap here so that we can take * appropriate action on failure. */ - rc = hugetlb_vmemmap_alloc(h, head); + rc = hugetlb_vmemmap_restore(h, head); if (!rc) { /* * Move PageHWPoison flag from head page to the raw @@ -3182,8 +3182,10 @@ static void __init report_hugepages(void) char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); - pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", + pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); + pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", + hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); } } @@ -3421,7 +3423,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page) remove_hugetlb_page_for_demote(h, page, false); spin_unlock_irq(&hugetlb_lock); - rc = hugetlb_vmemmap_alloc(h, page); + rc = hugetlb_vmemmap_restore(h, page); if (rc) { /* Allocation of vmemmmap failed, we can not demote page */ spin_lock_irq(&hugetlb_lock); @@ -4111,7 +4113,6 @@ void __init hugetlb_add_hstate(unsigned int order) h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); - hugetlb_vmemmap_init(h); parsed_hstate = h; } diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 6c7117c30e566..8da2b31bb59f5 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -35,16 +35,6 @@ struct vmemmap_remap_walk { struct list_head *vmemmap_pages; }; -/* - * There are a lot of struct page structures associated with each HugeTLB page. - * For tail pages, the value of compound_head is the same. So we can reuse first - * page of head page structures. We map the virtual addresses of all the pages - * of tail page structures to the head page struct, and then free these page - * frames. Therefore, we need to reserve one pages as vmemmap areas. - */ -#define RESERVE_VMEMMAP_NR 1U -#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) - static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) { pmd_t __pmd; @@ -426,32 +416,37 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); -/* - * Previously discarded vmemmap pages will be allocated and remapping - * after this function returns zero. +/** + * hugetlb_vmemmap_restore - restore previously optimized (by + * hugetlb_vmemmap_optimize()) vmemmap pages which + * will be reallocated and remapped. + * @h: struct hstate. + * @head: the head page whose vmemmap pages will be restored. + * + * Return: %0 if @head's vmemmap pages have been reallocated and remapped, + * negative error code otherwise. */ -int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head) +int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) { int ret; - unsigned long vmemmap_addr = (unsigned long)head; - unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages; + unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; + unsigned long vmemmap_reuse; if (!HPageVmemmapOptimized(head)) return 0; - vmemmap_addr += RESERVE_VMEMMAP_SIZE; - vmemmap_pages = hugetlb_optimize_vmemmap_pages(h); - vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT); - vmemmap_reuse = vmemmap_addr - PAGE_SIZE; + vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); + vmemmap_reuse = vmemmap_start; + vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* - * The pages which the vmemmap virtual address range [@vmemmap_addr, + * The pages which the vmemmap virtual address range [@vmemmap_start, * @vmemmap_end) are mapped to are freed to the buddy allocator, and * the range is mapped to the page which @vmemmap_reuse is mapped to. * When a HugeTLB page is freed to the buddy allocator, previously * discarded vmemmap pages must be allocated and remapping. */ - ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse, + ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); if (!ret) { ClearHPageVmemmapOptimized(head); @@ -461,11 +456,14 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head) return ret; } -static unsigned int vmemmap_optimizable_pages(struct hstate *h, - struct page *head) +/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ +static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) { if (!READ_ONCE(vmemmap_optimize_enabled)) - return 0; + return false; + + if (!hugetlb_vmemmap_optimizable(h)) + return false; if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) { pmd_t *pmdp, pmd; @@ -508,73 +506,47 @@ static unsigned int vmemmap_optimizable_pages(struct hstate *h, * +-------------------------------------------+ */ if (PageVmemmapSelfHosted(vmemmap_page)) - return 0; + return false; } - return hugetlb_optimize_vmemmap_pages(h); + return true; } -void hugetlb_vmemmap_free(struct hstate *h, struct page *head) +/** + * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages. + * @h: struct hstate. + * @head: the head page whose vmemmap pages will be optimized. + * + * This function only tries to optimize @head's vmemmap pages and does not + * guarantee that the optimization will succeed after it returns. The caller + * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages + * have been optimized. + */ +void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) { - unsigned long vmemmap_addr = (unsigned long)head; - unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages; + unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; + unsigned long vmemmap_reuse; - vmemmap_pages = vmemmap_optimizable_pages(h, head); - if (!vmemmap_pages) + if (!vmemmap_should_optimize(h, head)) return; static_branch_inc(&hugetlb_optimize_vmemmap_key); - vmemmap_addr += RESERVE_VMEMMAP_SIZE; - vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT); - vmemmap_reuse = vmemmap_addr - PAGE_SIZE; + vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); + vmemmap_reuse = vmemmap_start; + vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* - * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end) + * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end) * to the page which @vmemmap_reuse is mapped to, then free the pages - * which the range [@vmemmap_addr, @vmemmap_end] is mapped to. + * which the range [@vmemmap_start, @vmemmap_end] is mapped to. */ - if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse)) + if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse)) static_branch_dec(&hugetlb_optimize_vmemmap_key); else SetHPageVmemmapOptimized(head); } -void __init hugetlb_vmemmap_init(struct hstate *h) -{ - unsigned int nr_pages = pages_per_huge_page(h); - unsigned int vmemmap_pages; - - /* - * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct - * page structs that can be used when HVO is enabled, add a BUILD_BUG_ON - * to catch invalid usage of the tail page structs. - */ - BUILD_BUG_ON(__NR_USED_SUBPAGE >= - RESERVE_VMEMMAP_SIZE / sizeof(struct page)); - - if (!is_power_of_2(sizeof(struct page))) { - pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n"); - return; - } - - vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; - /* - * The head page is not to be freed to buddy allocator, the other tail - * pages will map to the head page, so they can be freed. - * - * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true - * on some architectures (e.g. aarch64). See Documentation/arm64/ - * hugetlbpage.rst for more details. - */ - if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR)) - h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR; - - pr_info("can optimize %d vmemmap pages for %s\n", - h->optimize_vmemmap_pages, h->name); -} - -#ifdef CONFIG_PROC_SYSCTL static struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", @@ -586,16 +558,21 @@ static struct ctl_table hugetlb_vmemmap_sysctls[] = { { } }; -static __init int hugetlb_vmemmap_sysctls_init(void) +static int __init hugetlb_vmemmap_init(void) { - /* - * If "struct page" crosses page boundaries, the vmemmap pages cannot - * be optimized. - */ - if (is_power_of_2(sizeof(struct page))) - register_sysctl_init("vm", hugetlb_vmemmap_sysctls); - + /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */ + BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE); + + if (IS_ENABLED(CONFIG_PROC_SYSCTL)) { + const struct hstate *h; + + for_each_hstate(h) { + if (hugetlb_vmemmap_optimizable(h)) { + register_sysctl_init("vm", hugetlb_vmemmap_sysctls); + break; + } + } + } return 0; } -late_initcall(hugetlb_vmemmap_sysctls_init); -#endif /* CONFIG_PROC_SYSCTL */ +late_initcall(hugetlb_vmemmap_init); diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index ba66fadad9fca..25bd0e0024314 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -11,35 +11,50 @@ #include #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head); -void hugetlb_vmemmap_free(struct hstate *h, struct page *head); -void hugetlb_vmemmap_init(struct hstate *h); +int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head); +void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head); /* - * How many vmemmap pages associated with a HugeTLB page that can be - * optimized and freed to the buddy allocator. + * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See + * Documentation/vm/vmemmap_dedup.rst. */ -static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h) +#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE + +static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) { - return h->optimize_vmemmap_pages; + return pages_per_huge_page(h) * sizeof(struct page); +} + +/* + * Return how many vmemmap size associated with a HugeTLB page that can be + * optimized and can be freed to the buddy allocator. + */ +static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) +{ + int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE; + + if (!is_power_of_2(sizeof(struct page))) + return 0; + return size > 0 ? size : 0; } #else -static inline int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head) +static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) { return 0; } -static inline void hugetlb_vmemmap_free(struct hstate *h, struct page *head) +static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) { } -static inline void hugetlb_vmemmap_init(struct hstate *h) +static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { + return 0; } +#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ -static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h) +static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h) { - return 0; + return hugetlb_vmemmap_optimizable_size(h) != 0; } -#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ #endif /* _LINUX_HUGETLB_VMEMMAP_H */ From 4ff0e81ffdd40043198871cfea8ed59e8afe4551 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:34 +0800 Subject: [PATCH 222/321] mm: hugetlb_vmemmap: move code comments to vmemmap_dedup.rst All the comments which explains how HVO works are moved to vmemmap_dedup.rst since commit 4917f55b4ef9 ("mm/sparse-vmemmap: improve memory savings for compound devmaps") except some comments above page_fixed_fake_head(). This commit moves those comments to vmemmap_dedup.rst and improve vmemmap_dedup.rst as well. Link: https://lkml.kernel.org/r/20220628092235.91270-8-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Mike Kravetz Cc: Oscar Salvador Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- Documentation/mm/vmemmap_dedup.rst | 70 ++++++++++++++++++++---------- include/linux/page-flags.h | 15 +------ 2 files changed, 49 insertions(+), 36 deletions(-) diff --git a/Documentation/mm/vmemmap_dedup.rst b/Documentation/mm/vmemmap_dedup.rst index 7d7a161aa3646..a4b12ff906c4d 100644 --- a/Documentation/mm/vmemmap_dedup.rst +++ b/Documentation/mm/vmemmap_dedup.rst @@ -9,23 +9,23 @@ HugeTLB This section is to explain how HugeTLB Vmemmap Optimization (HVO) works. -The struct page structures (page structs) are used to describe a physical -page frame. By default, there is a one-to-one mapping from a page frame to -it's corresponding page struct. +The ``struct page`` structures are used to describe a physical page frame. By +default, there is a one-to-one mapping from a page frame to it's corresponding +``struct page``. HugeTLB pages consist of multiple base page size pages and is supported by many architectures. See Documentation/admin-guide/mm/hugetlbpage.rst for more details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB are currently supported. Since the base page size on x86 is 4KB, a 2MB HugeTLB page consists of 512 base pages and a 1GB HugeTLB page consists of 4096 base pages. -For each base page, there is a corresponding page struct. +For each base page, there is a corresponding ``struct page``. -Within the HugeTLB subsystem, only the first 4 page structs are used to -contain unique information about a HugeTLB page. __NR_USED_SUBPAGE provides -this upper limit. The only 'useful' information in the remaining page structs +Within the HugeTLB subsystem, only the first 4 ``struct page`` are used to +contain unique information about a HugeTLB page. ``__NR_USED_SUBPAGE`` provides +this upper limit. The only 'useful' information in the remaining ``struct page`` is the compound_head field, and this field is the same for all tail pages. -By removing redundant page structs for HugeTLB pages, memory can be returned +By removing redundant ``struct page`` for HugeTLB pages, memory can be returned to the buddy allocator for other uses. Different architectures support different HugeTLB pages. For example, the @@ -46,7 +46,7 @@ page. | | 64KB | 2MB | 512MB | 16GB | | +--------------+-----------+-----------+-----------+-----------+-----------+ -When the system boot up, every HugeTLB page has more than one struct page +When the system boot up, every HugeTLB page has more than one ``struct page`` structs which size is (unit: pages):: struct_size = HugeTLB_Size / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE @@ -76,10 +76,10 @@ Where n is how many pte entries which one page can contains. So the value of n is (PAGE_SIZE / sizeof(pte_t)). This optimization only supports 64-bit system, so the value of sizeof(pte_t) -is 8. And this optimization also applicable only when the size of struct page -is a power of two. In most cases, the size of struct page is 64 bytes (e.g. +is 8. And this optimization also applicable only when the size of ``struct page`` +is a power of two. In most cases, the size of ``struct page`` is 64 bytes (e.g. x86-64 and arm64). So if we use pmd level mapping for a HugeTLB page, the -size of struct page structs of it is 8 page frames which size depends on the +size of ``struct page`` structs of it is 8 page frames which size depends on the size of the base page. For the HugeTLB page of the pud level mapping, then:: @@ -88,7 +88,7 @@ For the HugeTLB page of the pud level mapping, then:: = PAGE_SIZE / 8 * 8 (pages) = PAGE_SIZE (pages) -Where the struct_size(pmd) is the size of the struct page structs of a +Where the struct_size(pmd) is the size of the ``struct page`` structs of a HugeTLB page of the pmd level mapping. E.g.: A 2MB HugeTLB page on x86_64 consists in 8 page frames while 1GB @@ -96,7 +96,7 @@ HugeTLB page consists in 4096. Next, we take the pmd level mapping of the HugeTLB page as an example to show the internal implementation of this optimization. There are 8 pages -struct page structs associated with a HugeTLB page which is pmd mapped. +``struct page`` structs associated with a HugeTLB page which is pmd mapped. Here is how things look before optimization:: @@ -124,10 +124,10 @@ Here is how things look before optimization:: +-----------+ The value of page->compound_head is the same for all tail pages. The first -page of page structs (page 0) associated with the HugeTLB page contains the 4 -page structs necessary to describe the HugeTLB. The only use of the remaining -pages of page structs (page 1 to page 7) is to point to page->compound_head. -Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of page structs +page of ``struct page`` (page 0) associated with the HugeTLB page contains the 4 +``struct page`` necessary to describe the HugeTLB. The only use of the remaining +pages of ``struct page`` (page 1 to page 7) is to point to page->compound_head. +Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of ``struct page`` will be used for each HugeTLB page. This will allow us to free the remaining 7 pages to the buddy allocator. @@ -169,13 +169,37 @@ entries that can be cached in a single TLB entry. The contiguous bit is used to increase the mapping size at the pmd and pte (last) level. So this type of HugeTLB page can be optimized only when its -size of the struct page structs is greater than 1 page. +size of the ``struct page`` structs is greater than **1** page. Notice: The head vmemmap page is not freed to the buddy allocator and all tail vmemmap pages are mapped to the head vmemmap page frame. So we can see -more than one struct page struct with PG_head (e.g. 8 per 2 MB HugeTLB page) -associated with each HugeTLB page. The compound_head() can handle this -correctly (more details refer to the comment above compound_head()). +more than one ``struct page`` struct with ``PG_head`` (e.g. 8 per 2 MB HugeTLB +page) associated with each HugeTLB page. The ``compound_head()`` can handle +this correctly. There is only **one** head ``struct page``, the tail +``struct page`` with ``PG_head`` are fake head ``struct page``. We need an +approach to distinguish between those two different types of ``struct page`` so +that ``compound_head()`` can return the real head ``struct page`` when the +parameter is the tail ``struct page`` but with ``PG_head``. The following code +snippet describes how to distinguish between real and fake head ``struct page``. + +.. code-block:: c + + if (test_bit(PG_head, &page->flags)) { + unsigned long head = READ_ONCE(page[1].compound_head); + + if (head & 1) { + if (head == (unsigned long)page + 1) + /* head struct page */ + else + /* tail struct page */ + } else { + /* head struct page */ + } + } + +We can safely access the field of the **page[1]** with ``PG_head`` because the +page is a compound page composed with at least two contiguous pages. +The implementation refers to ``page_fixed_fake_head()``. Device DAX ========== @@ -189,7 +213,7 @@ PMD_SIZE (2M on x86_64) and PUD_SIZE (1G on x86_64). The differences with HugeTLB are relatively minor. -It only use 3 page structs for storing all information as opposed +It only use 3 ``struct page`` for storing all information as opposed to 4 on HugeTLB pages. There's no remapping of vmemmap given that device-dax memory is not part of diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a2ada8c75d5e8..d270b16602cd1 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -208,19 +208,8 @@ enum pageflags { DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); /* - * If HVO is enabled, the head vmemmap page frame is reused and all of the tail - * vmemmap addresses map to the head vmemmap page frame (furture details can - * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other - * words, there are more than one page struct with PG_head associated with each - * HugeTLB page. We __know__ that there is only one head page struct, the tail - * page structs with PG_head are fake head page structs. We need an approach - * to distinguish between those two different types of page structs so that - * compound_head() can return the real head page struct when the parameter is - * the tail page struct but with PG_head. - * - * The page_fixed_fake_head() returns the real head page struct if the @page is - * fake page head, otherwise, returns @page which can either be a true page - * head or tail. + * Return the real head page struct iff the @page is a fake head page, otherwise + * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { From 73d0bad3e49f218aafd189d87cd30ec2cdad1145 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jun 2022 17:22:35 +0800 Subject: [PATCH 223/321] mm: hugetlb_vmemmap: use PTRS_PER_PTE instead of PMD_SIZE / PAGE_SIZE There is already a macro PTRS_PER_PTE to represent the number of page table entries, just use it. Link: https://lkml.kernel.org/r/20220628092235.91270-9-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Mike Kravetz Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Oscar Salvador Cc: Will Deacon Cc: Xiongchun Duan Signed-off-by: Andrew Morton --- mm/hugetlb_vmemmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 8da2b31bb59f5..20f414c0379f9 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -48,7 +48,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) pmd_populate_kernel(&init_mm, &__pmd, pgtable); - for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) { + for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { pte_t entry, *pte; pgprot_t pgprot = PAGE_KERNEL; From 28674ebfd77e30520d9049ecd39941e52ee8f344 Mon Sep 17 00:00:00 2001 From: Adam Sindelar Date: Mon, 4 Jul 2022 19:33:51 +0200 Subject: [PATCH 224/321] selftests/vm: fix errno handling in mrelease_test mrelease_test should return KSFT_SKIP when process_mrelease is not defined, but due to a perror call consuming the errno, it returns KSFT_FAIL. This patch decides the exit code before calling perror. Link: https://lkml.kernel.org/r/20220704173351.19595-1-adam@wowsignal.io Fixes: 33776141b812 ("selftests: vm: add process_mrelease tests") Signed-off-by: Adam Sindelar Reviewed-by: David Vernet Reviewed-by: Suren Baghdasaryan Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/mrelease_test.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/vm/mrelease_test.c b/tools/testing/selftests/vm/mrelease_test.c index 96671c2f7d485..e8b17258579ba 100644 --- a/tools/testing/selftests/vm/mrelease_test.c +++ b/tools/testing/selftests/vm/mrelease_test.c @@ -100,8 +100,10 @@ int main(void) /* Test a wrong pidfd */ if (!syscall(__NR_process_mrelease, -1, 0) || errno != EBADF) { + /* perror overwrites errno, so this line must be first */ + res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("process_mrelease with wrong pidfd"); - exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); + exit(res); } /* Start the test with 1MB child memory allocation */ @@ -156,8 +158,9 @@ int main(void) run_negative_tests(pidfd); if (kill(pid, SIGKILL)) { + res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("kill"); - exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); + exit(res); } success = (syscall(__NR_process_mrelease, pidfd, 0) == 0); From 6bc76350fbafb6f42a435e518af24cd87be1d516 Mon Sep 17 00:00:00 2001 From: Adam Sindelar Date: Wed, 6 Jul 2022 16:16:02 +0200 Subject: [PATCH 225/321] selftests-vm-fix-errno-handling-in-mrelease_test-v4 fix remaining instances of errno mishandling Link: https://lkml.kernel.org/r/20220706141602.10159-1-adam@wowsignal.io Fixes: 33776141b812 ("selftests: vm: add process_mrelease tests") Signed-off-by: Adam Sindelar Reviewed-by: David Vernet Reviewed-by: Suren Baghdasaryan Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/mrelease_test.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/vm/mrelease_test.c b/tools/testing/selftests/vm/mrelease_test.c index e8b17258579ba..6c62966ab5dbc 100644 --- a/tools/testing/selftests/vm/mrelease_test.c +++ b/tools/testing/selftests/vm/mrelease_test.c @@ -62,19 +62,22 @@ static int alloc_noexit(unsigned long nr_pages, int pipefd) /* The process_mrelease calls in this test are expected to fail */ static void run_negative_tests(int pidfd) { + int res; /* Test invalid flags. Expect to fail with EINVAL error code. */ if (!syscall(__NR_process_mrelease, pidfd, (unsigned int)-1) || errno != EINVAL) { + res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("process_mrelease with wrong flags"); - exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); + exit(res); } /* * Test reaping while process is alive with no pending SIGKILL. * Expect to fail with EINVAL error code. */ if (!syscall(__NR_process_mrelease, pidfd, 0) || errno != EINVAL) { + res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("process_mrelease on a live process"); - exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); + exit(res); } } @@ -100,7 +103,6 @@ int main(void) /* Test a wrong pidfd */ if (!syscall(__NR_process_mrelease, -1, 0) || errno != EBADF) { - /* perror overwrites errno, so this line must be first */ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("process_mrelease with wrong pidfd"); exit(res); @@ -175,9 +177,10 @@ int main(void) if (errno == ESRCH) { retry = (size <= MAX_SIZE_MB); } else { + res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); perror("process_mrelease"); waitpid(pid, NULL, 0); - exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL); + exit(res); } } From 7acc8ac3774bc108ea034ea3f564431b6293d111 Mon Sep 17 00:00:00 2001 From: Adam Sindelar Date: Mon, 4 Jul 2022 14:38:13 +0200 Subject: [PATCH 226/321] selftests/vm: skip 128TBswitch on unsupported arch The test va_128TBswitch.c exercises a feature only supported on PPC and x86_64, but it's run on other 64-bit archs as well. Before this patch, the test did nothing and returned 0 for KSFT_PASS. This patch makes it return the KSFT codes from kselftest.h, including KSFT_SKIP when appropriate. Verified on arm64 and x86_64. Link: https://lkml.kernel.org/r/20220704123813.427625-1-adam@wowsignal.io Signed-off-by: Adam Sindelar Cc: David Vernet Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/va_128TBswitch.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c index da6ec3b53ea8d..1d20689898839 100644 --- a/tools/testing/selftests/vm/va_128TBswitch.c +++ b/tools/testing/selftests/vm/va_128TBswitch.c @@ -231,7 +231,7 @@ static struct testcase hugetlb_testcases[] = { static int run_test(struct testcase *test, int count) { void *p; - int i, ret = 0; + int i, ret = KSFT_PASS; for (i = 0; i < count; i++) { struct testcase *t = test + i; @@ -242,13 +242,13 @@ static int run_test(struct testcase *test, int count) if (p == MAP_FAILED) { printf("FAILED\n"); - ret = 1; + ret = KSFT_FAIL; continue; } if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) { printf("FAILED\n"); - ret = 1; + ret = KSFT_FAIL; } else { /* * Do a dereference of the address returned so that we catch @@ -280,7 +280,7 @@ int main(int argc, char **argv) int ret; if (!supported_arch()) - return 0; + return KSFT_SKIP; ret = run_test(testcases, ARRAY_SIZE(testcases)); if (argc == 2 && !strcmp(argv[1], "--run-hugetlb")) From 8309f5dd2d170fde3c89f5a23cf076cfe2129771 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:46 +0800 Subject: [PATCH 227/321] mm/huge_memory: use flush_pmd_tlb_range in move_huge_pmd Patch series "A few cleanup patches for huge_memory", v3. This series contains a few cleaup patches to remove duplicated codes, add/use helper functions, fix some obsolete comments and so on. More details can be found in the respective changelogs. This patch (of 16): Arches with special requirements for evicting THP backing TLB entries can implement flush_pmd_tlb_range. Otherwise also, it can help optimize TLB flush in THP regime. Using flush_pmd_tlb_range to take advantage of this in move_huge_pmd. Link: https://lkml.kernel.org/r/20220704132201.14611-1-linmiaohe@huawei.com Link: https://lkml.kernel.org/r/20220704132201.14611-2-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Reviewed-by: Zach O'Keefe Cc: Yang Shi Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 532ede06a10f7..eda8cf4fad2eb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1749,7 +1749,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, pmd = move_soft_dirty_pmd(pmd); set_pmd_at(mm, new_addr, new_pmd, pmd); if (force_flush) - flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); From bf323d656588246db0a9439cf991547366f5159f Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:47 +0800 Subject: [PATCH 228/321] mm/huge_memory: access vm_page_prot with READ_ONCE in remove_migration_pmd vma->vm_page_prot is read lockless from the rmap_walk, it may be updated concurrently. Using READ_ONCE to prevent the risk of reading intermediate values. Link: https://lkml.kernel.org/r/20220704132201.14611-3-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Matthew Wilcox Cc: Muchun Song Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index eda8cf4fad2eb..a4efbf869017d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3205,7 +3205,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) entry = pmd_to_swp_entry(*pvmw->pmd); get_page(new); - pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); + pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot))); if (pmd_swp_soft_dirty(*pvmw->pmd)) pmde = pmd_mksoft_dirty(pmde); if (is_writable_migration_entry(entry)) From 74aafcb35c9ea83ccc68a053bc7a11d2d9544edc Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:48 +0800 Subject: [PATCH 229/321] mm/huge_memory: fix comment of __pud_trans_huge_lock __pud_trans_huge_lock returns page table lock pointer if a given pud maps a thp instead of 'true' since introduced. Fix corresponding comments. Link: https://lkml.kernel.org/r/20220704132201.14611-4-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a4efbf869017d..9c5e5b1925aa0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1906,10 +1906,10 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) } /* - * Returns true if a given pud maps a thp, false otherwise. + * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. * - * Note that if it returns true, this routine returns without unlocking page - * table lock. So callers must unlock it. + * Note that if it returns page table lock pointer, this routine returns without + * unlocking page table lock. So callers must unlock it. */ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { From 449c8bd520a3bba6d0095e0717cf9ad768b59c55 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:49 +0800 Subject: [PATCH 230/321] mm/huge_memory: use helper touch_pud in huge_pud_set_accessed Use helper touch_pud to set pud accessed to simplify the code and improve the readability. No functional change intended. Link: https://lkml.kernel.org/r/20220704132201.14611-5-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9c5e5b1925aa0..fcf70f121b104 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1184,15 +1184,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void touch_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags) + pud_t *pud, bool write) { pud_t _pud; _pud = pud_mkyoung(*pud); - if (flags & FOLL_WRITE) + if (write) _pud = pud_mkdirty(_pud); if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, - pud, _pud, flags & FOLL_WRITE)) + pud, _pud, write)) update_mmu_cache_pud(vma, addr, pud); } @@ -1219,7 +1219,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pud(vma, addr, pud, flags); + touch_pud(vma, addr, pud, flags & FOLL_WRITE); /* * device mapped pages can only be returned if the @@ -1284,21 +1284,13 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) { - pud_t entry; - unsigned long haddr; bool write = vmf->flags & FAULT_FLAG_WRITE; vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); if (unlikely(!pud_same(*vmf->pud, orig_pud))) goto unlock; - entry = pud_mkyoung(orig_pud); - if (write) - entry = pud_mkdirty(entry); - haddr = vmf->address & HPAGE_PUD_MASK; - if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) - update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); - + touch_pud(vmf->vma, vmf->address, vmf->pud, write); unlock: spin_unlock(vmf->ptl); } From 475b7caf23a8ee56e7a34293596eaadadd2d045a Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:50 +0800 Subject: [PATCH 231/321] mm/huge_memory: use helper touch_pmd in huge_pmd_set_accessed Use helper touch_pmd to set pmd accessed to simplify the code and improve the readability. No functional change intended. Link: https://lkml.kernel.org/r/20220704132201.14611-6-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fcf70f121b104..d8214b9e50a0f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1020,15 +1020,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags) + pmd_t *pmd, bool write) { pmd_t _pmd; _pmd = pmd_mkyoung(*pmd); - if (flags & FOLL_WRITE) + if (write) _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, flags & FOLL_WRITE)) + pmd, _pmd, write)) update_mmu_cache_pmd(vma, addr, pmd); } @@ -1061,7 +1061,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd, flags); + touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); /* * device mapped pages can only be returned if the @@ -1298,21 +1298,13 @@ void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) void huge_pmd_set_accessed(struct vm_fault *vmf) { - pmd_t entry; - unsigned long haddr; bool write = vmf->flags & FAULT_FLAG_WRITE; - pmd_t orig_pmd = vmf->orig_pmd; vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); - if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) + if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) goto unlock; - entry = pmd_mkyoung(orig_pmd); - if (write) - entry = pmd_mkdirty(entry); - haddr = vmf->address & HPAGE_PMD_MASK; - if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) - update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); + touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); unlock: spin_unlock(vmf->ptl); @@ -1448,7 +1440,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, return ERR_PTR(-ENOMEM); if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd, flags); + touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); From dd72269fa1d6b6fe40334dfb99e501601c0bc20d Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:51 +0800 Subject: [PATCH 232/321] mm/huge_memory: rename mmun_start to haddr in remove_migration_pmd mmun_start indicates mmu_notifier start address but there's no mmu_notifier stuff in remove_migration_pmd. This will make it hard to get the meaning of mmun_start. Rename it to haddr to avoid confusing readers and also imporve readability. Link: https://lkml.kernel.org/r/20220704132201.14611-7-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d8214b9e50a0f..32c79b1c178f1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3180,7 +3180,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; - unsigned long mmun_start = address & HPAGE_PMD_MASK; + unsigned long haddr = address & HPAGE_PMD_MASK; pmd_t pmde; swp_entry_t entry; @@ -3203,12 +3203,12 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (!is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; - page_add_anon_rmap(new, vma, mmun_start, rmap_flags); + page_add_anon_rmap(new, vma, haddr, rmap_flags); } else { page_add_file_rmap(new, vma, true); } VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); - set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); + set_pmd_at(mm, haddr, pvmw->pmd, pmde); /* No need to invalidate - it was non-present before */ update_mmu_cache_pmd(vma, address, pvmw->pmd); From c74fae743d1fc97b2284633fc60ae32c6b338f14 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:52 +0800 Subject: [PATCH 233/321] mm/huge_memory: use helper function vma_lookup in split_huge_pages_pid Use helper function vma_lookup to lookup the needed vma to simplify the code. Minor readability improvement. Link: https://lkml.kernel.org/r/20220704132201.14611-8-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 32c79b1c178f1..6cd5764284991 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2941,10 +2941,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, * table filled with PTE-mapped THPs, each of which is distinct. */ for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { - struct vm_area_struct *vma = find_vma(mm, addr); + struct vm_area_struct *vma = vma_lookup(mm, addr); struct page *page; - if (!vma || addr < vma->vm_start) + if (!vma) break; /* skip special VMA and hugetlb VMA */ From 57348637970182569f195e397037f37297338e51 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:53 +0800 Subject: [PATCH 234/321] mm/huge_memory: use helper macro __ATTR_RW Use helper macro __ATTR_RW to define use_zero_page_attr, defrag_attr and enabled_attr to make code more clear. Minor readability improvement. Link: https://lkml.kernel.org/r/20220704132201.14611-9-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6cd5764284991..0045d889511db 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -276,8 +276,8 @@ static ssize_t enabled_store(struct kobject *kobj, } return ret; } -static struct kobj_attribute enabled_attr = - __ATTR(enabled, 0644, enabled_show, enabled_store); + +static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, @@ -366,8 +366,7 @@ static ssize_t defrag_store(struct kobject *kobj, return count; } -static struct kobj_attribute defrag_attr = - __ATTR(defrag, 0644, defrag_show, defrag_store); +static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); static ssize_t use_zero_page_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -381,8 +380,7 @@ static ssize_t use_zero_page_store(struct kobject *kobj, return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); } -static struct kobj_attribute use_zero_page_attr = - __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); +static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); static ssize_t hpage_pmd_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) From aeb206be0acffc7e5841d7e5480eaa06dc240fe3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:54 +0800 Subject: [PATCH 235/321] mm/huge_memory: fix comment in zap_huge_pud The comment about deposited pgtable is borrowed from zap_huge_pmd but there's no deposited pgtable stuff for huge pud in zap_huge_pud. Remove it to avoid confusion. Link: https://lkml.kernel.org/r/20220704132201.14611-10-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Matthew Wilcox Cc: Muchun Song Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0045d889511db..affcf718eba95 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1913,12 +1913,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, ptl = __pud_trans_huge_lock(pud, vma); if (!ptl) return 0; - /* - * For architectures like ppc64 we look at deposited pgtable - * when calling pudp_huge_get_and_clear. So do the - * pgtable_trans_huge_withdraw after finishing pudp related - * operations. - */ + pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); tlb_remove_pud_tlb_entry(tlb, pud, addr); if (vma_is_special_huge(vma)) { From 9ac90c8e5b86b36176e2999de82d4fea2557a5d9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:55 +0800 Subject: [PATCH 236/321] mm/huge_memory: check pmd_present first in is_huge_zero_pmd When pmd is non-present, pmd_pfn returns an insane value. So we should check pmd_present first to avoid acquiring such insane value and also avoid touching possible cold huge_zero_pfn cache line when pmd isn't present. Link: https://lkml.kernel.org/r/20220704132201.14611-11-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ae3d8e2fd9e2f..12b297f9951df 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -273,7 +273,7 @@ static inline bool is_huge_zero_page(struct page *page) static inline bool is_huge_zero_pmd(pmd_t pmd) { - return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); + return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); } static inline bool is_huge_zero_pud(pud_t pud) From bc20adfc8a7fe2db6f82fcd64a6820187154eb40 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:56 +0800 Subject: [PATCH 237/321] mm/huge_memory: try to free subpage in swapcache when possible Subpages in swapcache won't be freed even if it is the last user of the page until next time reclaim. It shouldn't hurt indeed, but we could try to free these pages to save more memory for system. Link: https://lkml.kernel.org/r/20220704132201.14611-12-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Matthew Wilcox Cc: Muchun Song Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index affcf718eba95..bd3714a2d6446 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2538,7 +2538,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, * requires taking the lru_lock so we do the put_page * of the tail pages after the split is complete. */ - put_page(subpage); + free_page_and_swap_cache(subpage); } } From 89742e569c587d742e7bc9ee6d08e0828c4b2e0a Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:57 +0800 Subject: [PATCH 238/321] mm/huge_memory: minor cleanup for split_huge_pages_all There is nothing to do if a zone doesn't have any pages managed by the buddy allocator. So we should check managed_zone instead. Also if a thp is found, there's no need to traverse the subpages again. Link: https://lkml.kernel.org/r/20220704132201.14611-13-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Matthew Wilcox Cc: Muchun Song Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bd3714a2d6446..899612be580bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2857,9 +2857,12 @@ static void split_huge_pages_all(void) unsigned long total = 0, split = 0; pr_debug("Split all THPs\n"); - for_each_populated_zone(zone) { + for_each_zone(zone) { + if (!managed_zone(zone)) + continue; max_zone_pfn = zone_end_pfn(zone); for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { + int nr_pages; if (!pfn_valid(pfn)) continue; @@ -2875,8 +2878,10 @@ static void split_huge_pages_all(void) total++; lock_page(page); + nr_pages = thp_nr_pages(page); if (!split_huge_page(page)) split++; + pfn += nr_pages - 1; unlock_page(page); next: put_page(page); From 39138a14b0c8cffdc4932b1e96984572a381d2e1 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:58 +0800 Subject: [PATCH 239/321] mm/huge_memory: fix comment of page_deferred_list The current comment is confusing because if global or memcg deferred list in the second tail page is occupied by compound_head, why we still use page[2].deferred_list here? I think it wants to say that Global or memcg deferred list in the first tail page is occupied by compound_mapcount and compound_pincount so we use the second tail page's deferred_list instead. Link: https://lkml.kernel.org/r/20220704132201.14611-14-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 12b297f9951df..37f2f11a6d7ee 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -294,8 +294,8 @@ static inline bool thp_migration_supported(void) static inline struct list_head *page_deferred_list(struct page *page) { /* - * Global or memcg deferred list in the second tail pages is - * occupied by compound_head. + * See organization of tail pages of compound page in + * "struct page" definition. */ return &page[2].deferred_list; } From 8862fe0f4d421cb0aed623e647fc07dbcd6b8393 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:21:59 +0800 Subject: [PATCH 240/321] mm/huge_memory: correct comment of prep_transhuge_page We use page->mapping and page->index, instead of page->indexlru in second tail page as list_head. Correct it. Link: https://lkml.kernel.org/r/20220704132201.14611-15-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 899612be580bf..cb0eb924234bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -581,7 +581,7 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page) void prep_transhuge_page(struct page *page) { /* - * we use page->mapping and page->indexlru in second tail page + * we use page->mapping and page->index in second tail page * as list_head: assuming THP order >= 2 */ From 46fa30f134262c9b194fab1ae88150a3f239f8b5 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:22:00 +0800 Subject: [PATCH 241/321] mm/huge_memory: comment the subtly logic in __split_huge_pmd It's dangerous and wrong to call page_folio(pmd_page(*pmd)) when pmd isn't present. But the caller guarantees pmd is present when folio is set. So we should be safe here. Add comment to make it clear. Link: https://lkml.kernel.org/r/20220704132201.14611-16-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Matthew Wilcox Cc: Muchun Song Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cb0eb924234bf..7b53f631befc1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2235,6 +2235,10 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)) { + /* + * It's safe to call pmd_page when folio is set because it's + * guaranteed that pmd is present. + */ if (folio && folio != page_folio(pmd_page(*pmd))) goto out; __split_huge_pmd_locked(vma, pmd, range.start, freeze); From ff4a92e373a52c5044fea60d1a860125eb936299 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Jul 2022 21:22:01 +0800 Subject: [PATCH 242/321] mm/huge_memory: use helper macro IS_ERR_OR_NULL in split_huge_pages_pid Use helper macro IS_ERR_OR_NULL to check the validity of page to simplify the code. Minor readability improvement. Link: https://lkml.kernel.org/r/20220704132201.14611-17-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Cc: Matthew Wilcox Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/huge_memory.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7b53f631befc1..9f70582b8f512 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2958,9 +2958,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, /* FOLL_DUMP to ignore special (like zero) pages */ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); - if (IS_ERR(page)) - continue; - if (!page || is_zone_device_page(page)) + if (IS_ERR_OR_NULL(page) || is_zone_device_page(page)) continue; if (!is_transparent_hugepage(page)) From bc669f3364ae24b9614e619aaab207102f6e6ac5 Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Mon, 4 Jul 2022 19:41:12 +0800 Subject: [PATCH 243/321] mm/mprotect: remove the redundant initialization for error The variable error will be assigned correctly before it is used, the initialization is redundant, so remove it. Link: https://lkml.kernel.org/r/20220704114112.163112-1-xiujianfeng@huawei.com Signed-off-by: Xiu Jianfeng Signed-off-by: Andrew Morton --- mm/mprotect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index 041beeb5fad65..0420c3ed936c9 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -663,7 +663,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, { unsigned long nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; - int error = -EINVAL; + int error; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); const bool rier = (current->personality & READ_IMPLIES_EXEC) && (prot & PROT_READ); From 862f6720ab68a533bec27701adc0db965c581329 Mon Sep 17 00:00:00 2001 From: Patrick Wang Date: Tue, 5 Jul 2022 19:31:58 +0800 Subject: [PATCH 244/321] mm: percpu: use kmemleak_ignore_phys() instead of kmemleak_free() Kmemleak recently added a rbtree to store the objects allocted with physical address. Those objects can't be freed with kmemleak_free(). According to the comments, percpu allocations are tracked by kmemleak separately. Kmemleak_free() was used to avoid the unnecessary tracking. If kmemleak_free() fails, those objects would be scanned by kmemleak, which is unnecessary but shouldn't lead to other effects. Use kmemleak_ignore_phys() instead of kmemleak_free() for those objects. Link: https://lkml.kernel.org/r/20220705113158.127600-1-patrick.wang.shcn@gmail.com Fixes: 0c24e061196c ("mm: kmemleak: add rbtree and store physical address for objects allocated with PA") Signed-off-by: Patrick Wang Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Catalin Marinas Signed-off-by: Andrew Morton --- mm/percpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/percpu.c b/mm/percpu.c index 3633eeefaa0db..27697b2429c2e 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -3104,7 +3104,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, goto out_free_areas; } /* kmemleak tracks the percpu allocations separately */ - kmemleak_free(ptr); + kmemleak_ignore_phys(__pa(ptr)); areas[group] = ptr; base = min(ptr, base); @@ -3304,7 +3304,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t goto enomem; } /* kmemleak tracks the percpu allocations separately */ - kmemleak_free(ptr); + kmemleak_ignore_phys(__pa(ptr)); pages[j++] = virt_to_page(ptr); } } @@ -3417,7 +3417,7 @@ void __init setup_per_cpu_areas(void) if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ - kmemleak_free(fc); + kmemleak_ignore_phys(__pa(fc)); ai->dyn_size = unit_size; ai->unit_size = unit_size; From 33f1325596983a793da00537216f9411a4dff073 Mon Sep 17 00:00:00 2001 From: Gang Li Date: Wed, 6 Jul 2022 11:46:54 +0800 Subject: [PATCH 245/321] mm, hugetlb: skip irrelevant nodes in show_free_areas() show_free_areas() allows to filter out node specific data which is irrelevant to the allocation request. But hugetlb_show_meminfo() still shows hugetlb on all nodes, which is redundant and unnecessary. Use show_mem_node_skip() to skip irrelevant nodes. And replace hugetlb_show_meminfo() with hugetlb_show_meminfo_node(nid). before-and-after sample output of OOM: before: ``` [ 214.362453] Node 1 active_anon:148kB inactive_anon:4050920kB active_file:112kB inactive_file:100kB [ 214.375429] Node 1 Normal free:45100kB boost:0kB min:45576kB low:56968kB high:68360kB reserved_hig [ 214.388334] lowmem_reserve[]: 0 0 0 0 0 [ 214.390251] Node 1 Normal: 423*4kB (UE) 320*8kB (UME) 187*16kB (UE) 117*32kB (UE) 57*64kB (UME) 20 [ 214.397626] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB [ 214.401518] Node 1 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB ``` after: ``` [ 145.069705] Node 1 active_anon:128kB inactive_anon:4049412kB active_file:56kB inactive_file:84kB u [ 145.110319] Node 1 Normal free:45424kB boost:0kB min:45576kB low:56968kB high:68360kB reserved_hig [ 145.152315] lowmem_reserve[]: 0 0 0 0 0 [ 145.155244] Node 1 Normal: 470*4kB (UME) 373*8kB (UME) 247*16kB (UME) 168*32kB (UE) 86*64kB (UME) [ 145.164119] Node 1 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB ``` Link: https://lkml.kernel.org/r/20220706034655.1834-1-ligang.bdlg@bytedance.com Signed-off-by: Gang Li Reviewed-by: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 18 ++++++++---------- mm/page_alloc.c | 8 ++++++-- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index dce46d5715755..6d0620edf0a60 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -152,7 +152,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct page *ref_page, zap_flags_t zap_flags); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(char *buf, int len, int nid); -void hugetlb_show_meminfo(void); +void hugetlb_show_meminfo_node(int nid); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); @@ -298,7 +298,7 @@ static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) return 0; } -static inline void hugetlb_show_meminfo(void) +static inline void hugetlb_show_meminfo_node(int nid) { } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 321384e9e7460..a4506ed1f1dbf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4478,22 +4478,20 @@ int hugetlb_report_node_meminfo(char *buf, int len, int nid) nid, h->surplus_huge_pages_node[nid]); } -void hugetlb_show_meminfo(void) +void hugetlb_show_meminfo_node(int nid) { struct hstate *h; - int nid; if (!hugepages_supported()) return; - for_each_node_state(nid, N_MEMORY) - for_each_hstate(h) - pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", - nid, - h->nr_huge_pages_node[nid], - h->free_huge_pages_node[nid], - h->surplus_huge_pages_node[nid], - huge_page_size(h) / SZ_1K); + for_each_hstate(h) + printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", + nid, + h->nr_huge_pages_node[nid], + h->free_huge_pages_node[nid], + h->surplus_huge_pages_node[nid], + huge_page_size(h) / SZ_1K); } void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c5c78170c49ad..5dcb865feb67b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6014,7 +6014,7 @@ static void show_migration_types(unsigned char type) void show_free_areas(unsigned int filter, nodemask_t *nodemask) { unsigned long free_pcp = 0; - int cpu; + int cpu, nid; struct zone *zone; pg_data_t *pgdat; @@ -6202,7 +6202,11 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) printk(KERN_CONT "= %lukB\n", K(total)); } - hugetlb_show_meminfo(); + for_each_online_node(nid) { + if (show_mem_node_skip(filter, nid, nodemask)) + continue; + hugetlb_show_meminfo_node(nid); + } printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); From 84b9649fd0fffa8462ee4d5868de14079afbb7de Mon Sep 17 00:00:00 2001 From: Joel Savitz Date: Wed, 6 Jul 2022 16:54:51 -0400 Subject: [PATCH 246/321] selftests/vm: enable running select groups of tests Our memory management kernel CI testing at Red Hat uses the VM selftests and we have run into two problems: First, our LTP tests overlap with the VM selftests. We want to avoid unhelpful redundancy in our testing practices. Second, we have observed the current run_vmtests.sh to report overall failure/ambiguous results in the case that a machine lacks the necessary hardware to perform one or more of the tests. E.g. ksm tests that require more than one numa node. We want to be able to run the vm selftests suitable to particular hardware. Add the ability to run one or more groups of vm tests via run_vmtests.sh instead of simply all-or-none in order to solve these problems. Preserve existing default behavior of running all tests when the script is invoked with no arguments. Documentation of test groups is included in the patch as follows: # ./run_vmtests.sh [ -h || --help ] usage: ./tools/testing/selftests/vm/run_vmtests.sh [ -h | -t ""] -t: specify specific categories to tests to run -h: display this message The default behavior is to run all tests. Alternatively, specific groups tests can be run by passing a string to the -t argument containing one or more of the following categories separated by spaces: - mmap tests for mmap(2) - gup_test tests for gup using gup_test interface - userfaultfd tests for userfaultfd(2) - compaction a test for the patch "Allow compaction of unevictable pages" - mlock tests for mlock(2) - mremap tests for mremap(2) - hugevm tests for very large virtual address space - vmalloc vmalloc smoke tests - hmm hmm smoke tests - madv_populate test memadvise(2) MADV_POPULATE_{READ,WRITE} options - memfd_secret test memfd_secret(2) - process_mrelease test process_mrelease(2) - ksm ksm tests that do not require >=2 NUMA nodes - ksm_numa ksm tests that require >=2 NUMA nodes - pkey memory protection key tests example: ./run_vmtests.sh -t "hmm mmap ksm" Changes from v2: - rebase onto the mm-everyting branch in https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git - integrate this functionality with new the tests Changes from v1: - use a command line argument to pass the test categories to the script instead of an environmet variable - remove novel prints to avoid messing with extant parsers of this script - update the usage text Link: https://lkml.kernel.org/r/20220706205451.4133254-1-jsavitz@redhat.com Signed-off-by: Joel Savitz Cc: Shuah Khan Cc: Nico Pache Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/run_vmtests.sh | 241 +++++++++++++++------- 1 file changed, 161 insertions(+), 80 deletions(-) diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh index d84fe0fa15e1f..fb72a1338d075 100755 --- a/tools/testing/selftests/vm/run_vmtests.sh +++ b/tools/testing/selftests/vm/run_vmtests.sh @@ -1,6 +1,6 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -#please run as root +# Please run as root # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -8,15 +8,75 @@ ksft_skip=4 mnt=./huge exitcode=0 -#get huge pagesize and freepages from /proc/meminfo -while read -r name size unit; do - if [ "$name" = "HugePages_Free:" ]; then - freepgs="$size" - fi - if [ "$name" = "Hugepagesize:" ]; then - hpgsize_KB="$size" +usage() { + cat <"] + -t: specify specific categories to tests to run + -h: display this message + +The default behavior is to run all tests. + +Alternatively, specific groups tests can be run by passing a string +to the -t argument containing one or more of the following categories +separated by spaces: +- mmap + tests for mmap(2) +- gup_test + tests for gup using gup_test interface +- userfaultfd + tests for userfaultfd(2) +- compaction + a test for the patch "Allow compaction of unevictable pages" +- mlock + tests for mlock(2) +- mremap + tests for mremap(2) +- hugevm + tests for very large virtual address space +- vmalloc + vmalloc smoke tests +- hmm + hmm smoke tests +- madv_populate + test memadvise(2) MADV_POPULATE_{READ,WRITE} options +- memfd_secret + test memfd_secret(2) +- process_mrelease + test process_mrelease(2) +- ksm + ksm tests that do not require >=2 NUMA nodes +- ksm_numa + ksm tests that require >=2 NUMA nodes +- pkey + memory protection key tests +example: ./run_vmtests.sh -t "hmm mmap ksm" +EOF + exit 0 +} + + +while getopts "ht:" OPT; do + case ${OPT} in + "h") usage ;; + "t") TEST_ITEMS=${OPTARG} ;; + esac +done +shift $((OPTIND -1)) + +# default behavior: run all tests +TEST_ITEMS=${TEST_ITEMS:-default} + +test_selected() { + if [ "$TEST_ITEMS" == "default" ]; then + # If no TEST_ITEMS are specified, run all tests + return 0 fi -done < /proc/meminfo + echo ${TEST_ITEMS} | grep ${1} 2>&1 >/dev/null + return ${?} +} + +# Hugepage setup only needed for hugetlb tests +if test_selected "hugetlb"; then # Simple hugetlbfs tests have a hardcoded minimum requirement of # huge pages totaling 256MB (262144KB) in size. The userfaultfd @@ -28,7 +88,17 @@ hpgsize_MB=$((hpgsize_KB / 1024)) half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) needmem_KB=$((half_ufd_size_MB * 2 * 1024)) -#set proper nr_hugepages +# get huge pagesize and freepages from /proc/meminfo +while read -r name size unit; do + if [ "$name" = "HugePages_Free:" ]; then + freepgs="$size" + fi + if [ "$name" = "Hugepagesize:" ]; then + hpgsize_KB="$size" + fi +done < /proc/meminfo + +# set proper nr_hugepages if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages) needpgs=$((needmem_KB / hpgsize_KB)) @@ -57,144 +127,155 @@ else exit 1 fi -#filter 64bit architectures +fi # test_selected "hugetlb" + +# filter 64bit architectures ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64" if [ -z "$ARCH" ]; then ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/') fi VADDR64=0 -echo "$ARCH64STR" | grep "$ARCH" && VADDR64=1 +echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1 # Usage: run_test [test binary] [arbitrary test arguments...] run_test() { - local title="running $*" - local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) - printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" - - "$@" - local ret=$? - if [ $ret -eq 0 ]; then - echo "[PASS]" - elif [ $ret -eq $ksft_skip ]; then - echo "[SKIP]" - exitcode=$ksft_skip - else - echo "[FAIL]" - exitcode=1 - fi + if test_selected ${CATEGORY}; then + local title="running $*" + local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) + printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" + + "$@" + local ret=$? + if [ $ret -eq 0 ]; then + echo "[PASS]" + elif [ $ret -eq $ksft_skip ]; then + echo "[SKIP]" + exitcode=$ksft_skip + else + echo "[FAIL]" + exitcode=1 + fi + fi # test_selected } -mkdir "$mnt" -mount -t hugetlbfs none "$mnt" +# setup only needed for hugetlb tests +if test_selected "hugetlb"; then + mkdir "$mnt" + mount -t hugetlbfs none "$mnt" +fi -run_test ./hugepage-mmap +CATEGORY="hugetlb" run_test ./hugepage-mmap shmmax=$(cat /proc/sys/kernel/shmmax) shmall=$(cat /proc/sys/kernel/shmall) echo 268435456 > /proc/sys/kernel/shmmax echo 4194304 > /proc/sys/kernel/shmall -run_test ./hugepage-shm +CATEGORY="hugetlb" run_test ./hugepage-shm echo "$shmmax" > /proc/sys/kernel/shmmax echo "$shmall" > /proc/sys/kernel/shmall -run_test ./map_hugetlb +CATEGORY="hugetlb" run_test ./map_hugetlb -run_test ./hugepage-mremap "$mnt"/huge_mremap -rm -f "$mnt"/huge_mremap +CATEGORY="hugetlb" run_test ./hugepage-mremap "$mnt"/huge_mremap +test_selected "hugetlb" && rm -f "$mnt"/huge_mremap -run_test ./hugepage-vmemmap +CATEGORY="hugetlb" run_test ./hugepage-vmemmap -run_test ./hugetlb-madvise "$mnt"/madvise-test -rm -f "$mnt"/madvise-test +CATEGORY="hugetlb" run_test ./hugetlb-madvise "$mnt"/madvise-test +test_selected "hugetlb" && rm -f "$mnt"/madvise-test -echo "NOTE: The above hugetlb tests provide minimal coverage. Use" -echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" -echo " hugetlb regression testing." +if test_selected "hugetlb"; then + echo "NOTE: These hugetlb tests provide minimal coverage. Use" + echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" + echo " hugetlb regression testing." +fi -run_test ./map_fixed_noreplace +CATEGORY="mmap" run_test ./map_fixed_noreplace # get_user_pages_fast() benchmark -run_test ./gup_test -u +CATEGORY="gup_test" run_test ./gup_test -u # pin_user_pages_fast() benchmark -run_test ./gup_test -a +CATEGORY="gup_test" run_test ./gup_test -a # Dump pages 0, 19, and 4096, using pin_user_pages: -run_test ./gup_test -ct -F 0x1 0 19 0x1000 +CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000 -run_test ./userfaultfd anon 20 16 -run_test ./userfaultfd anon:dev 20 16 +CATEGORY="userfaultfd" run_test ./userfaultfd anon 20 16 +CATEGORY="userfaultfd" run_test ./userfaultfd anon:dev 20 16 # Hugetlb tests require source and destination huge pages. Pass in half the # size ($half_ufd_size_MB), which is used for *each*. -run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32 -run_test ./userfaultfd hugetlb:dev "$half_ufd_size_MB" 32 -run_test ./userfaultfd hugetlb_shared "$half_ufd_size_MB" 32 "$mnt"/uffd-test +CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32 +CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb:dev "$half_ufd_size_MB" 32 +CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb_shared "$half_ufd_size_MB" 32 "$mnt"/uffd-test rm -f "$mnt"/uffd-test -run_test ./userfaultfd hugetlb_shared:dev "$half_ufd_size_MB" 32 "$mnt"/uffd-test +CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb_shared:dev "$half_ufd_size_MB" 32 "$mnt"/uffd-test rm -f "$mnt"/uffd-test -run_test ./userfaultfd shmem 20 16 -run_test ./userfaultfd shmem:dev 20 16 - -#cleanup -umount "$mnt" -rm -rf "$mnt" -echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages +CATEGORY="userfaultfd" run_test ./userfaultfd shmem 20 16 +CATEGORY="userfaultfd" run_test ./userfaultfd shmem:dev 20 16 + +# cleanup (only needed when running hugetlb tests) +if test_selected "hugetlb"; then + umount "$mnt" + rm -rf "$mnt" + echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages +fi -run_test ./compaction_test +CATEGORY="compaction" run_test ./compaction_test -run_test sudo -u nobody ./on-fault-limit +CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit -run_test ./map_populate +CATEGORY="mmap" run_test ./map_populate -run_test ./mlock-random-test +CATEGORY="mlock" run_test ./mlock-random-test -run_test ./mlock2-tests +CATEGORY="mlock" run_test ./mlock2-tests -run_test ./mrelease_test +CATEGORY="process_mrelease" run_test ./mrelease_test -run_test ./mremap_test +CATEGORY="mremap" run_test ./mremap_test -run_test ./thuge-gen +CATEGORY="hugetlb" run_test ./thuge-gen if [ $VADDR64 -ne 0 ]; then - run_test ./virtual_address_range + CATEGORY="hugevm" run_test ./virtual_address_range # virtual address 128TB switch test - run_test ./va_128TBswitch.sh + CATEGORY="hugevm" run_test ./va_128TBswitch.sh fi # VADDR64 # vmalloc stability smoke test -run_test ./test_vmalloc.sh smoke +CATEGORY="vmalloc" run_test ./test_vmalloc.sh smoke -run_test ./mremap_dontunmap +CATEGORY="mremap" run_test ./mremap_dontunmap -run_test ./test_hmm.sh smoke +CATEGORY="hmm" run_test ./test_hmm.sh smoke # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests -run_test ./madv_populate +CATEGORY="madv_populate" run_test ./madv_populate -run_test ./memfd_secret +CATEGORY="memfd_secret" run_test ./memfd_secret # KSM MADV_MERGEABLE test with 10 identical pages -run_test ./ksm_tests -M -p 10 +CATEGORY="ksm" run_test ./ksm_tests -M -p 10 # KSM unmerge test -run_test ./ksm_tests -U +CATEGORY="ksm" run_test ./ksm_tests -U # KSM test with 10 zero pages and use_zero_pages = 0 -run_test ./ksm_tests -Z -p 10 -z 0 +CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0 # KSM test with 10 zero pages and use_zero_pages = 1 -run_test ./ksm_tests -Z -p 10 -z 1 +CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1 # KSM test with 2 NUMA nodes and merge_across_nodes = 1 -run_test ./ksm_tests -N -m 1 +CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1 # KSM test with 2 NUMA nodes and merge_across_nodes = 0 -run_test ./ksm_tests -N -m 0 +CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0 # protection_keys tests if [ -x ./protection_keys_32 ] then - run_test ./protection_keys_32 + CATEGORY="pkey" run_test ./protection_keys_32 fi if [ -x ./protection_keys_64 ] then - run_test ./protection_keys_64 + CATEGORY="pkey" run_test ./protection_keys_64 fi exit $exitcode From 2f466333c6099b82f1a4ad2171a0d731c15452ca Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Fri, 8 Jul 2022 16:07:36 +0200 Subject: [PATCH 247/321] mm/page_alloc: use try_cmpxchg in set_pfnblock_flags_mask Use try_cmpxchg instead of cmpxchg in set_pfnblock_flags_mask. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). The main loop improves from: 1c5d: 48 89 c2 mov %rax,%rdx 1c60: 48 89 c1 mov %rax,%rcx 1c63: 48 21 fa and %rdi,%rdx 1c66: 4c 09 c2 or %r8,%rdx 1c69: f0 48 0f b1 16 lock cmpxchg %rdx,(%rsi) 1c6e: 48 39 c1 cmp %rax,%rcx 1c71: 75 ea jne 1c5d <...> to: 1c60: 48 89 ca mov %rcx,%rdx 1c63: 48 21 c2 and %rax,%rdx 1c66: 4c 09 c2 or %r8,%rdx 1c69: f0 48 0f b1 16 lock cmpxchg %rdx,(%rsi) 1c6e: 75 f0 jne 1c60 <...> Link: https://lkml.kernel.org/r/20220708140736.8737-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak Cc: Andrew Morton Signed-off-by: Andrew Morton --- mm/page_alloc.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5dcb865feb67b..d0d09a9ce3647 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -602,7 +602,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, { unsigned long *bitmap; unsigned long bitidx, word_bitidx; - unsigned long old_word, word; + unsigned long word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); @@ -618,12 +618,8 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, flags <<= bitidx; word = READ_ONCE(bitmap[word_bitidx]); - for (;;) { - old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); - if (word == old_word) - break; - word = old_word; - } + do { + } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); } void set_pageblock_migratetype(struct page *page, int migratetype) From 70803e79723793fa93013dbf6d8401630bcc75f9 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:46 +0900 Subject: [PATCH 248/321] mm/hugetlb: check gigantic_page_runtime_supported() in return_unused_surplus_pages() Patch series "mm, hwpoison: enable 1GB hugepage support", v5. This patch (of 8): I found a weird state of 1GB hugepage pool, caused by the following procedure: - run a process reserving all free 1GB hugepages, - shrink free 1GB hugepage pool to zero (i.e. writing 0 to /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages), then - kill the reserving process. , then all the hugepages are free *and* surplus at the same time. $ cat /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages 3 $ cat /sys/kernel/mm/hugepages/hugepages-1048576kB/free_hugepages 3 $ cat /sys/kernel/mm/hugepages/hugepages-1048576kB/resv_hugepages 0 $ cat /sys/kernel/mm/hugepages/hugepages-1048576kB/surplus_hugepages 3 This state is resolved by reserving and allocating the pages then freeing them again, so this seems not to result in serious problem. But it's a little surprising (shrinking pool suddenly fails). This behavior is caused by hstate_is_gigantic() check in return_unused_surplus_pages(). This was introduced so long ago in 2008 by commit aa888a74977a ("hugetlb: support larger than MAX_ORDER"), and at that time the gigantic pages were not supposed to be allocated/freed at run-time. Now kernel can support runtime allocation/free, so let's check gigantic_page_runtime_supported() together. Link: https://lkml.kernel.org/r/20220708053653.964464-1-naoya.horiguchi@linux.dev Link: https://lkml.kernel.org/r/20220708053653.964464-2-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Cc: David Hildenbrand Cc: Mike Kravetz Cc: Miaohe Lin Cc: Liu Shixin Cc: Yang Shi Cc: Oscar Salvador Cc: Muchun Song Cc: kernel test robot Signed-off-by: Andrew Morton --- mm/hugetlb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a4506ed1f1dbf..cf8ccee7654c3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2432,8 +2432,7 @@ static void return_unused_surplus_pages(struct hstate *h, /* Uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; - /* Cannot return gigantic pages currently */ - if (hstate_is_gigantic(h)) + if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) goto out; /* From 7f75a5a8728c689e526a4f0d7f25be17e357eb92 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:47 +0900 Subject: [PATCH 249/321] mm/hugetlb: make pud_huge() and follow_huge_pud() aware of non-present pud entry follow_pud_mask() does not support non-present pud entry now. As long as I tested on x86_64 server, follow_pud_mask() still simply returns no_page_table() for non-present_pud_entry() due to pud_bad(), so no severe user-visible effect should happen. But generally we should call follow_huge_pud() for non-present pud entry for 1GB hugetlb page. Update pud_huge() and follow_huge_pud() to handle non-present pud entries. The changes are similar to previous works for pud entries commit e66f17ff7177 ("mm/hugetlb: take page table lock in follow_huge_pmd()") and commit cbef8478bee5 ("mm/hugetlb: pmd_huge() returns true for non-present hugepage"). Link: https://lkml.kernel.org/r/20220708053653.964464-3-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Reviewed-by: Mike Kravetz Cc: David Hildenbrand Cc: kernel test robot Cc: Liu Shixin Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- arch/x86/mm/hugetlbpage.c | 8 +++++++- mm/hugetlb.c | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 509408da0da1e..6b3033845c6d3 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -30,9 +30,15 @@ int pmd_huge(pmd_t pmd) (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; } +/* + * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ int pud_huge(pud_t pud) { - return !!(pud_val(pud) & _PAGE_PSE); + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; } #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cf8ccee7654c3..77119d93a0f91 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6978,10 +6978,38 @@ struct page * __weak follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags) { - if (flags & (FOLL_GET | FOLL_PIN)) + struct page *page = NULL; + spinlock_t *ptl; + pte_t pte; + + if (WARN_ON_ONCE(flags & FOLL_PIN)) return NULL; - return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); +retry: + ptl = huge_pte_lock(hstate_sizelog(PUD_SHIFT), mm, (pte_t *)pud); + if (!pud_huge(*pud)) + goto out; + pte = huge_ptep_get((pte_t *)pud); + if (pte_present(pte)) { + page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); + if (WARN_ON_ONCE(!try_grab_page(page, flags))) { + page = NULL; + goto out; + } + } else { + if (is_hugetlb_entry_migration(pte)) { + spin_unlock(ptl); + __migration_entry_wait(mm, (pte_t *)pud, ptl); + goto retry; + } + /* + * hwpoisoned entry is treated as no_page_table in + * follow_page_mask(). + */ + } +out: + spin_unlock(ptl); + return page; } struct page * __weak From 94b1cf70b7429e3fa3100b0c6ad85ac141c23a52 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:48 +0900 Subject: [PATCH 250/321] mm, hwpoison, hugetlb: support saving mechanism of raw error pages When handling memory error on a hugetlb page, the error handler tries to dissolve and turn it into 4kB pages. If it's successfully dissolved, PageHWPoison flag is moved to the raw error page, so that's all right. However, dissolve sometimes fails, then the error page is left as hwpoisoned hugepage. It's useful if we can retry to dissolve it to save healthy pages, but that's not possible now because the information about where the raw error pages is lost. Use the private field of a few tail pages to keep that information. The code path of shrinking hugepage pool uses this info to try delayed dissolve. In order to remember multiple errors in a hugepage, a singly-linked list originated from SUBPAGE_INDEX_HWPOISON-th tail page is constructed. Only simple operations (adding an entry or clearing all) are required and the list is assumed not to be very long, so this simple data structure should be enough. If we failed to save raw error info, the hwpoison hugepage has errors on unknown subpage, then this new saving mechanism does not work any more, so disable saving new raw error info and freeing hwpoison hugepages. Link: https://lkml.kernel.org/r/20220708053653.964464-4-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reported-by: kernel test robot Cc: David Hildenbrand Cc: Liu Shixin Cc: Miaohe Lin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 18 +++++++++- mm/hugetlb.c | 32 ++++++++++++----- mm/memory-failure.c | 79 +++++++++++++++++++++++++++++++++++++++-- 3 files changed, 116 insertions(+), 13 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6d0620edf0a60..6fd128b80d575 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -42,6 +42,9 @@ enum { SUBPAGE_INDEX_CGROUP, /* reuse page->private */ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, +#endif +#ifdef CONFIG_MEMORY_FAILURE + SUBPAGE_INDEX_HWPOISON, #endif __NR_USED_SUBPAGE, }; @@ -551,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * Synchronization: Initially set after new page allocation with no * locking. When examined and modified during migration processing * (isolate, migrate, putback) the hugetlb_lock is held. - * HPG_temporary - - Set on a page that is temporarily allocated from the buddy + * HPG_temporary - Set on a page that is temporarily allocated from the buddy * allocator. Typically used for migration target pages when no pages * are available in the pool. The hugetlb free page path will * immediately free pages with this flag set to the buddy allocator. @@ -561,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * HPG_freed - Set when page is on the free lists. * Synchronization: hugetlb_lock held for examination and modification. * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. + * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page + * that is not tracked by raw_hwp_page list. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, @@ -568,6 +573,7 @@ enum hugetlb_page_flags { HPG_temporary, HPG_freed, HPG_vmemmap_optimized, + HPG_raw_hwp_unreliable, __NR_HPAGEFLAGS, }; @@ -614,6 +620,7 @@ HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) +HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) #ifdef CONFIG_HUGETLB_PAGE @@ -796,6 +803,15 @@ extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); +#ifdef CONFIG_MEMORY_FAILURE +extern int hugetlb_clear_page_hwpoison(struct page *hpage); +#else +static inline int hugetlb_clear_page_hwpoison(struct page *hpage) +{ + return 0; +} +#endif + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported static inline bool arch_hugetlb_migration_supported(struct hstate *h) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 77119d93a0f91..3956494cc5fbc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1442,6 +1442,15 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page, h->surplus_huge_pages_node[nid]--; } + /* + * This leaves HPageRawHwpUnreliable pages as leaked hugepages, not + * as leaked generic-compound pages. Otherwise page_mapped() or + * folio_mapped() gets slow because for-loop for each subpage is + * called. + */ + if (HPageRawHwpUnreliable(page)) + return; + /* * Very subtle * @@ -1535,6 +1544,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; + /* + * If we don't know which subpages are hwpoisoned, we can't free + * the hugepage, so it's leaked intentionally. + */ + if (HPageRawHwpUnreliable(page)) + return; + if (hugetlb_vmemmap_restore(h, page)) { spin_lock_irq(&hugetlb_lock); /* @@ -1547,6 +1563,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page) return; } + /* + * Move PageHWPoison flag from head page to the raw error pages, + * which makes any healthy subpages reusable. + */ + if (unlikely(PageHWPoison(page))) + hugetlb_clear_page_hwpoison(page); + for (i = 0; i < pages_per_huge_page(h); i++, subpage = mem_map_next(subpage, page, i)) { subpage->flags &= ~(1 << PG_locked | 1 << PG_error | @@ -2109,15 +2132,6 @@ int dissolve_free_huge_page(struct page *page) */ rc = hugetlb_vmemmap_restore(h, head); if (!rc) { - /* - * Move PageHWPoison flag from head page to the raw - * error page, which makes any subpages rather than - * the error page reusable. - */ - if (PageHWPoison(head) && page != head) { - SetPageHWPoison(page); - ClearPageHWPoison(head); - } update_and_free_page(h, head, false); } else { spin_lock_irq(&hugetlb_lock); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c9931c6763356..6833c5e4b410e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1664,6 +1664,80 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, EXPORT_SYMBOL_GPL(mf_dax_kill_procs); #endif /* CONFIG_FS_DAX */ +#ifdef CONFIG_HUGETLB_PAGE +/* + * Struct raw_hwp_page represents information about "raw error page", + * constructing singly linked list originated from ->private field of + * SUBPAGE_INDEX_HWPOISON-th tail page. + */ +struct raw_hwp_page { + struct llist_node node; + struct page *page; +}; + +static inline struct llist_head *raw_hwp_list_head(struct page *hpage) +{ + return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON); +} + +static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page) +{ + struct llist_head *head; + struct raw_hwp_page *raw_hwp; + struct llist_node *t, *tnode; + int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0; + + /* + * Once the hwpoison hugepage has lost reliable raw error info, + * there is little meaning to keep additional error info precisely, + * so skip to add additional raw error info. + */ + if (HPageRawHwpUnreliable(hpage)) + return -EHWPOISON; + head = raw_hwp_list_head(hpage); + llist_for_each_safe(tnode, t, head->first) { + struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); + + if (p->page == page) + return -EHWPOISON; + } + + raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); + if (raw_hwp) { + raw_hwp->page = page; + llist_add(&raw_hwp->node, head); + /* the first error event will be counted in action_result(). */ + if (ret) + num_poisoned_pages_inc(); + } else { + /* + * Failed to save raw error info. We no longer trace all + * hwpoisoned subpages, and we need refuse to free/dissolve + * this hwpoisoned hugepage. + */ + SetHPageRawHwpUnreliable(hpage); + } + return ret; +} + +int hugetlb_clear_page_hwpoison(struct page *hpage) +{ + struct llist_head *head; + struct llist_node *t, *tnode; + + if (!HPageRawHwpUnreliable(hpage)) + ClearPageHWPoison(hpage); + head = raw_hwp_list_head(hpage); + llist_for_each_safe(tnode, t, head->first) { + struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); + + SetPageHWPoison(p->page); + kfree(p); + } + llist_del_all(head); + return 0; +} + /* * Called from hugetlb code with hugetlb_lock held. * @@ -1698,7 +1772,7 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) goto out; } - if (TestSetPageHWPoison(head)) { + if (hugetlb_set_page_hwpoison(head, page)) { ret = -EHWPOISON; goto out; } @@ -1710,7 +1784,6 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) return ret; } -#ifdef CONFIG_HUGETLB_PAGE /* * Taking refcount of hugetlb pages needs extra care about race conditions * with basic operations like hugepage allocation/free/demotion. @@ -1751,7 +1824,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb lock_page(head); if (hwpoison_filter(p)) { - ClearPageHWPoison(head); + hugetlb_clear_page_hwpoison(head); res = -EOPNOTSUPP; goto out; } From 07dc3ec522dbdebe05632bdd90d7b8a8fde70f17 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:49 +0900 Subject: [PATCH 251/321] mm, hwpoison: make unpoison aware of raw error info in hwpoisoned hugepage Raw error info list needs to be removed when hwpoisoned hugetlb is unpoisoned. And unpoison handler needs to know how many errors there are in the target hugepage. So add them. HPageVmemmapOptimized(hpage) and HPageRawHwpUnreliable(hpage)) can't be unpoisoned, so let's skip them. Link: https://lkml.kernel.org/r/20220708053653.964464-5-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reported-by: kernel test robot Cc: David Hildenbrand Cc: Liu Shixin Cc: Miaohe Lin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/swapops.h | 9 ++++++++ mm/memory-failure.c | 50 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/include/linux/swapops.h b/include/linux/swapops.h index bb7afd03a324f..a3d435bf9f972 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -490,6 +490,11 @@ static inline void num_poisoned_pages_dec(void) atomic_long_dec(&num_poisoned_pages); } +static inline void num_poisoned_pages_sub(long i) +{ + atomic_long_sub(i, &num_poisoned_pages); +} + #else static inline swp_entry_t make_hwpoison_entry(struct page *page) @@ -505,6 +510,10 @@ static inline int is_hwpoison_entry(swp_entry_t swp) static inline void num_poisoned_pages_inc(void) { } + +static inline void num_poisoned_pages_sub(long i) +{ +} #endif static inline int non_swap_entry(swp_entry_t entry) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6833c5e4b410e..89e74ec8a95f5 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1720,22 +1720,41 @@ static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page) return ret; } -int hugetlb_clear_page_hwpoison(struct page *hpage) +static long free_raw_hwp_pages(struct page *hpage, bool move_flag) { struct llist_head *head; struct llist_node *t, *tnode; + long count = 0; - if (!HPageRawHwpUnreliable(hpage)) - ClearPageHWPoison(hpage); + /* + * HPageVmemmapOptimized hugepages can't be unpoisoned because + * struct pages for tail pages are required to free hwpoisoned + * hugepages. HPageRawHwpUnreliable hugepages shouldn't be + * unpoisoned by definition. + */ + if (HPageVmemmapOptimized(hpage) || HPageRawHwpUnreliable(hpage)) + return 0; head = raw_hwp_list_head(hpage); llist_for_each_safe(tnode, t, head->first) { struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); - SetPageHWPoison(p->page); + if (move_flag) + SetPageHWPoison(p->page); kfree(p); + count++; } llist_del_all(head); - return 0; + return count; +} + +int hugetlb_clear_page_hwpoison(struct page *hpage) +{ + int ret = -EBUSY; + + if (!HPageRawHwpUnreliable(hpage)) + ret = !TestClearPageHWPoison(hpage); + free_raw_hwp_pages(hpage, true); + return ret; } /* @@ -1879,6 +1898,10 @@ static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int * return 0; } +static inline long free_raw_hwp_pages(struct page *hpage, bool move_flag) +{ + return 0; +} #endif /* CONFIG_HUGETLB_PAGE */ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, @@ -2284,6 +2307,7 @@ int unpoison_memory(unsigned long pfn) struct page *p; int ret = -EBUSY; int freeit = 0; + long count = 1; static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); @@ -2331,6 +2355,13 @@ int unpoison_memory(unsigned long pfn) ret = get_hwpoison_page(p, MF_UNPOISON); if (!ret) { + if (PageHuge(p)) { + count = free_raw_hwp_pages(page, false); + if (count == 0) { + ret = -EBUSY; + goto unlock_mutex; + } + } ret = TestClearPageHWPoison(page) ? 0 : -EBUSY; } else if (ret < 0) { if (ret == -EHWPOISON) { @@ -2339,6 +2370,13 @@ int unpoison_memory(unsigned long pfn) unpoison_pr_info("Unpoison: failed to grab page %#lx\n", pfn, &unpoison_rs); } else { + if (PageHuge(p)) { + count = free_raw_hwp_pages(page, false); + if (count == 0) { + ret = -EBUSY; + goto unlock_mutex; + } + } freeit = !!TestClearPageHWPoison(p); put_page(page); @@ -2351,7 +2389,7 @@ int unpoison_memory(unsigned long pfn) unlock_mutex: mutex_unlock(&mf_mutex); if (!ret || freeit) { - num_poisoned_pages_dec(); + num_poisoned_pages_sub(count); unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", page_to_pfn(p), &unpoison_rs); } From f80e6ebc97537afbc33bc23d314fb48c8aea145b Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:50 +0900 Subject: [PATCH 252/321] mm, hwpoison: set PG_hwpoison for busy hugetlb pages If memory_failure() fails to grab page refcount on a hugetlb page because it's busy, it returns without setting PG_hwpoison on it. This not only loses a chance of error containment, but breaks the rule that action_result() should be called only when memory_failure() do any of handling work (even if that's just setting PG_hwpoison). This inconsistency could harm code maintainability. So set PG_hwpoison and call hugetlb_set_page_hwpoison() for such a case. Link: https://lkml.kernel.org/r/20220708053653.964464-6-naoya.horiguchi@linux.dev Fixes: 405ce051236c ("mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb()") Signed-off-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Cc: David Hildenbrand Cc: kernel test robot Cc: Liu Shixin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + mm/memory-failure.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 4287bec50c286..7668831c919fd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3188,6 +3188,7 @@ enum mf_flags { MF_SOFT_OFFLINE = 1 << 3, MF_UNPOISON = 1 << 4, MF_SW_SIMULATED = 1 << 5, + MF_NO_RETRY = 1 << 6, }; int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, unsigned long count, int mf_flags); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 89e74ec8a95f5..f17f0475db939 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1788,7 +1788,8 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) count_increased = true; } else { ret = -EBUSY; - goto out; + if (!(flags & MF_NO_RETRY)) + goto out; } if (hugetlb_set_page_hwpoison(head, page)) { @@ -1815,7 +1816,6 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb struct page *p = pfn_to_page(pfn); struct page *head; unsigned long page_flags; - bool retry = true; *hugetlb = 1; retry: @@ -1831,8 +1831,8 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb } return res; } else if (res == -EBUSY) { - if (retry) { - retry = false; + if (!(flags & MF_NO_RETRY)) { + flags |= MF_NO_RETRY; goto retry; } action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); From b54f7577764ab00a6684138a0e98b12463a981be Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:51 +0900 Subject: [PATCH 253/321] mm, hwpoison: make __page_handle_poison returns int __page_handle_poison() returns bool that shows whether take_page_off_buddy() has passed or not now. But we will want to distinguish another case of "dissolve has passed but taking off failed" by its return value. So change the type of the return value. No functional change. Link: https://lkml.kernel.org/r/20220708053653.964464-7-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Cc: David Hildenbrand Cc: kernel test robot Cc: Liu Shixin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- mm/memory-failure.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f17f0475db939..e38a94007ffe1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -71,7 +71,13 @@ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); static bool hw_memory_failure __read_mostly = false; -static bool __page_handle_poison(struct page *page) +/* + * Return values: + * 1: the page is dissolved (if needed) and taken off from buddy, + * 0: the page is dissolved (if needed) and not taken off from buddy, + * < 0: failed to dissolve. + */ +static int __page_handle_poison(struct page *page) { int ret; @@ -81,7 +87,7 @@ static bool __page_handle_poison(struct page *page) ret = take_page_off_buddy(page); zone_pcp_enable(page_zone(page)); - return ret > 0; + return ret; } static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) @@ -91,7 +97,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo * Doing this check for free pages is also fine since dissolve_free_huge_page * returns 0 for non-hugetlb pages as well. */ - if (!__page_handle_poison(page)) + if (__page_handle_poison(page) <= 0) /* * We could fail to take off the target page from buddy * for example due to racy page allocation, but that's @@ -1086,7 +1092,7 @@ static int me_huge_page(struct page_state *ps, struct page *p) * subpages. */ put_page(hpage); - if (__page_handle_poison(p)) { + if (__page_handle_poison(p) > 0) { page_ref_inc(p); res = MF_RECOVERED; } @@ -1855,7 +1861,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb if (res == 0) { unlock_page(head); res = MF_FAILED; - if (__page_handle_poison(p)) { + if (__page_handle_poison(p) > 0) { page_ref_inc(p); res = MF_RECOVERED; } From 1dad295fc7807ac31f79cc320e1b6705607f76e0 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:52 +0900 Subject: [PATCH 254/321] mm, hwpoison: skip raw hwpoison page in freeing 1GB hugepage Currently if memory_failure() (modified to remove blocking code with subsequent patch) is called on a page in some 1GB hugepage, memory error handling fails and the raw error page gets into leaked state. The impact is small in production systems (just leaked single 4kB page), but this limits the testability because unpoison doesn't work for it. We can no longer create 1GB hugepage on the 1GB physical address range with such leaked pages, that's not useful when testing on small systems. When a hwpoison page in a 1GB hugepage is handled, it's caught by the PageHWPoison check in free_pages_prepare() because the 1GB hugepage is broken down into raw error pages before coming to this point: if (unlikely(PageHWPoison(page)) && !order) { ... return false; } Then, the page is not sent to buddy and the page refcount is left 0. Originally this check is supposed to work when the error page is freed from page_handle_poison() (that is called from soft-offline), but now we are opening another path to call it, so the callers of __page_handle_poison() need to handle the case by considering the return value 0 as success. Then page refcount for hwpoison is properly incremented so unpoison works. Link: https://lkml.kernel.org/r/20220708053653.964464-8-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Cc: David Hildenbrand Cc: kernel test robot Cc: Liu Shixin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- mm/memory-failure.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index e38a94007ffe1..cdf266aaabdfd 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1084,7 +1084,6 @@ static int me_huge_page(struct page_state *ps, struct page *p) res = truncate_error_page(hpage, page_to_pfn(p), mapping); unlock_page(hpage); } else { - res = MF_FAILED; unlock_page(hpage); /* * migration entry prevents later access on error hugepage, @@ -1092,9 +1091,11 @@ static int me_huge_page(struct page_state *ps, struct page *p) * subpages. */ put_page(hpage); - if (__page_handle_poison(p) > 0) { + if (__page_handle_poison(p) >= 0) { page_ref_inc(p); res = MF_RECOVERED; + } else { + res = MF_FAILED; } } @@ -1860,10 +1861,11 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb */ if (res == 0) { unlock_page(head); - res = MF_FAILED; - if (__page_handle_poison(p) > 0) { + if (__page_handle_poison(p) >= 0) { page_ref_inc(p); res = MF_RECOVERED; + } else { + res = MF_FAILED; } action_result(pfn, MF_MSG_FREE_HUGE, res); return res == MF_RECOVERED ? 0 : -EBUSY; From 0866ce8a92c7d8c8efcb17593fe7b225d26664a2 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 8 Jul 2022 14:36:53 +0900 Subject: [PATCH 255/321] mm, hwpoison: enable memory error handling on 1GB hugepage Now error handling code is prepared, so remove the blocking code and enable memory error handling on 1GB hugepage. Link: https://lkml.kernel.org/r/20220708053653.964464-9-naoya.horiguchi@linux.dev Signed-off-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Cc: David Hildenbrand Cc: kernel test robot Cc: Liu Shixin Cc: Mike Kravetz Cc: Muchun Song Cc: Oscar Salvador Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - include/ras/ras_event.h | 1 - mm/memory-failure.c | 16 ---------------- 3 files changed, 18 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 7668831c919fd..b0e83835184e9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3241,7 +3241,6 @@ enum mf_action_page_type { MF_MSG_DIFFERENT_COMPOUND, MF_MSG_HUGE, MF_MSG_FREE_HUGE, - MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index d0337a41141c8..cbd3ddd7c33d4 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -360,7 +360,6 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \ EM ( MF_MSG_HUGE, "huge page" ) \ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \ - EM ( MF_MSG_NON_PMD_HUGE, "non-pmd-sized huge page" ) \ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index cdf266aaabdfd..3640bbe5a3a2d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -765,7 +765,6 @@ static const char * const action_page_types[] = { [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", [MF_MSG_HUGE] = "huge page", [MF_MSG_FREE_HUGE] = "free huge page", - [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page", [MF_MSG_UNMAP_FAILED] = "unmapping failed page", [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", @@ -1873,21 +1872,6 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb page_flags = head->flags; - /* - * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so - * simply disable it. In order to make it work properly, we need - * make sure that: - * - conversion of a pud that maps an error hugetlb into hwpoison - * entry properly works, and - * - other mm code walking over page table is aware of pud-aligned - * hwpoison entries. - */ - if (huge_page_size(page_hstate(head)) > PMD_SIZE) { - action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED); - res = -EBUSY; - goto out; - } - if (!hwpoison_user_mappings(p, pfn, flags, head)) { action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); res = -EBUSY; From 97c1e64fcc5c61e26c806d635e60004f9607af9f Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:19 -0700 Subject: [PATCH 256/321] mm/khugepaged: remove redundant transhuge_vma_suitable() check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: userspace hugepage collapse", v7. Introduction -------------------------------- This series provides a mechanism for userspace to induce a collapse of eligible ranges of memory into transparent hugepages in process context, thus permitting users to more tightly control their own hugepage utilization policy at their own expense. This idea was introduced by David Rientjes[5]. Interface -------------------------------- The proposed interface adds a new madvise(2) mode, MADV_COLLAPSE, and leverages the new process_madvise(2) call. process_madvise(2) Performs a synchronous collapse of the native pages mapped by the list of iovecs into transparent hugepages. This operation is independent of the system THP sysfs settings, but attempts to collapse VMAs marked VM_NOHUGEPAGE will still fail. THP allocation may enter direct reclaim and/or compaction. When a range spans multiple VMAs, the semantics of the collapse over of each VMA is independent from the others. Caller must have CAP_SYS_ADMIN if not acting on self. Return value follows existing process_madvise(2) conventions. A “success” indicates that all hugepage-sized/aligned regions covered by the provided range were either successfully collapsed, or were already pmd-mapped THPs. madvise(2) Equivalent to process_madvise(2) on self, with 0 returned on “success”. Current Use-Cases -------------------------------- (1) Immediately back executable text by THPs. Current support provided by CONFIG_READ_ONLY_THP_FOR_FS may take a long time on a large system which might impair services from serving at their full rated load after (re)starting. Tricks like mremap(2)'ing text onto anonymous memory to immediately realize iTLB performance prevents page sharing and demand paging, both of which increase steady state memory footprint. With MADV_COLLAPSE, we get the best of both worlds: Peak upfront performance and lower RAM footprints. Note that subsequent support for file-backed memory is required here. (2) malloc() implementations that manage memory in hugepage-sized chunks, but sometimes subrelease memory back to the system in native-sized chunks via MADV_DONTNEED; zapping the pmd. Later, when the memory is hot, the implementation could madvise(MADV_COLLAPSE) to re-back the memory by THPs to regain hugepage coverage and dTLB performance. TCMalloc is such an implementation that could benefit from this[6]. A prior study of Google internal workloads during evaluation of Temeraire, a hugepage-aware enhancement to TCMalloc, showed that nearly 20% of all cpu cycles were spent in dTLB stalls, and that increasing hugepage coverage by even small amount can help with that[7]. (3) userfaultfd-based live migration of virtual machines satisfy UFFD faults by fetching native-sized pages over the network (to avoid latency of transferring an entire hugepage). However, after guest memory has been fully copied to the new host, MADV_COLLAPSE can be used to immediately increase guest performance. Note that subsequent support for file/shmem-backed memory is required here. (4) HugeTLB high-granularity mapping allows HugeTLB a HugeTLB page to be mapped at different levels in the page tables[8]. As it's not "transparent" like THP, HugeTLB high-granularity mappings require an explicit user API. It is intended that MADV_COLLAPSE be co-opted for this use case[9]. Note that subsequent support for HugeTLB memory is required here. Future work -------------------------------- Only private anonymous memory is supported by this series. File and shmem memory support will be added later. One possible user of this functionality is a userspace agent that attempts to optimize THP utilization system-wide by allocating THPs based on, for example, task priority, task performance requirements, or heatmaps. For the latter, one idea that has already surfaced is using DAMON to identify hot regions, and driving THP collapse through a new DAMOS_COLLAPSE scheme[10]. Sequence of Patches -------------------------------- * Patch 1 is a cleanup patch. * Patch 2 (Yang Shi) removes UMA hugepage preallocation and makes khugepaged hugepage allocation independent of CONFIG_NUMA * Patches 3-8 perform refactoring of collapse logic within khugepaged.c and introduce the notion of a collapse context. * Patch 9 introduces MADV_COLLAPSE and is the main patch in this series. * Patches 10-13 add additional support: tracepoints, clean-ups, process_madvise(2), and /proc//smaps output * Patches 14-18 add selftests. This patch (of 18): transhuge_vma_suitable() is called twice in hugepage_vma_revalidate() path. Remove the first check, and rely on the second check inside hugepage_vma_check(). Link: https://lkml.kernel.org/r/20220706235936.2197195-1-zokeefe@google.com Link: https://lkml.kernel.org/r/20220706235936.2197195-2-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: David Hildenbrand Cc: David Rientjes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Pasha Tatashin Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Minchan Kim Cc: Pavel Begunkov Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton --- mm/khugepaged.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index cfe231c5958f7..5269d15e20f6f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -918,8 +918,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!vma) return SCAN_VMA_NULL; - if (!transhuge_vma_suitable(vma, address)) - return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) return SCAN_VMA_CHECK; /* From d7e6aef068f78526aed955380f63e5de57625e2d Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 6 Jul 2022 16:59:20 -0700 Subject: [PATCH 257/321] mm: khugepaged: don't carry huge page to the next loop for !CONFIG_NUMA The khugepaged has optimization to reduce huge page allocation calls for !CONFIG_NUMA by carrying the allocated but failed to collapse huge page to the next loop. CONFIG_NUMA doesn't do so since the next loop may try to collapse huge page from a different node, so it doesn't make too much sense to carry it. But when NUMA=n, the huge page is allocated by khugepaged_prealloc_page() before scanning the address space, so it means huge page may be allocated even though there is no suitable range for collapsing. Then the page would be just freed if khugepaged already made enough progress. This could make NUMA=n run have 5 times as much thp_collapse_alloc as NUMA=y run. This problem actually makes things worse due to the way more pointless THP allocations and makes the optimization pointless. This could be fixed by carrying the huge page across scans, but it will complicate the code further and the huge page may be carried indefinitely. But if we take one step back, the optimization itself seems not worth keeping nowadays since: * Not too many users build NUMA=n kernel nowadays even though the kernel is actually running on a non-NUMA machine. Some small devices may run NUMA=n kernel, but I don't think they actually use THP. * Since commit 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists"), THP could be cached by pcp. This actually somehow does the job done by the optimization. Link: https://lkml.kernel.org/r/20220706235936.2197195-3-zokeefe@google.com Signed-off-by: Yang Shi Signed-off-by: Zach O'Keefe Co-developed-by: Peter Xu Signed-off-by: Peter Xu Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 120 +++++++++++------------------------------------- 1 file changed, 26 insertions(+), 94 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5269d15e20f6f..196eaadbf4151 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -796,29 +796,16 @@ static int khugepaged_find_target_node(void) last_khugepaged_target_node = target_node; return target_node; } - -static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) +#else +static int khugepaged_find_target_node(void) { - if (IS_ERR(*hpage)) { - if (!*wait) - return false; - - *wait = false; - *hpage = NULL; - khugepaged_alloc_sleep(); - } else if (*hpage) { - put_page(*hpage); - *hpage = NULL; - } - - return true; + return 0; } +#endif static struct page * khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) { - VM_BUG_ON_PAGE(*hpage, *hpage); - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); @@ -830,74 +817,6 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) count_vm_event(THP_COLLAPSE_ALLOC); return *hpage; } -#else -static int khugepaged_find_target_node(void) -{ - return 0; -} - -static inline struct page *alloc_khugepaged_hugepage(void) -{ - struct page *page; - - page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), - HPAGE_PMD_ORDER); - if (page) - prep_transhuge_page(page); - return page; -} - -static struct page *khugepaged_alloc_hugepage(bool *wait) -{ - struct page *hpage; - - do { - hpage = alloc_khugepaged_hugepage(); - if (!hpage) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - if (!*wait) - return NULL; - - *wait = false; - khugepaged_alloc_sleep(); - } else - count_vm_event(THP_COLLAPSE_ALLOC); - } while (unlikely(!hpage) && likely(hugepage_flags_enabled())); - - return hpage; -} - -static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) -{ - /* - * If the hpage allocated earlier was briefly exposed in page cache - * before collapse_file() failed, it is possible that racing lookups - * have not yet completed, and would then be unpleasantly surprised by - * finding the hpage reused for the same mapping at a different offset. - * Just release the previous allocation if there is any danger of that. - */ - if (*hpage && page_count(*hpage) > 1) { - put_page(*hpage); - *hpage = NULL; - } - - if (!*hpage) - *hpage = khugepaged_alloc_hugepage(wait); - - if (unlikely(!*hpage)) - return false; - - return true; -} - -static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) -{ - VM_BUG_ON(!*hpage); - - return *hpage; -} -#endif /* * If mmap_lock temporarily dropped, revalidate vma @@ -1148,8 +1067,10 @@ static void collapse_huge_page(struct mm_struct *mm, out_up_write: mmap_write_unlock(mm); out_nolock: - if (!IS_ERR_OR_NULL(*hpage)) + if (!IS_ERR_OR_NULL(*hpage)) { mem_cgroup_uncharge(page_folio(*hpage)); + put_page(*hpage); + } trace_mm_collapse_huge_page(mm, isolated, result); return; } @@ -1951,8 +1872,10 @@ static void collapse_file(struct mm_struct *mm, unlock_page(new_page); out: VM_BUG_ON(!list_empty(&pagelist)); - if (!IS_ERR_OR_NULL(*hpage)) + if (!IS_ERR_OR_NULL(*hpage)) { mem_cgroup_uncharge(page_folio(*hpage)); + put_page(*hpage); + } /* TODO: tracepoints */ } @@ -2197,10 +2120,7 @@ static void khugepaged_do_scan(void) lru_add_drain_all(); - while (progress < pages) { - if (!khugepaged_prealloc_page(&hpage, &wait)) - break; - + while (true) { cond_resched(); if (unlikely(kthread_should_stop() || try_to_freeze())) @@ -2216,10 +2136,22 @@ static void khugepaged_do_scan(void) else progress = pages; spin_unlock(&khugepaged_mm_lock); - } - if (!IS_ERR_OR_NULL(hpage)) - put_page(hpage); + if (progress >= pages) + break; + + if (IS_ERR(hpage)) { + /* + * If fail to allocate the first time, try to sleep for + * a while. When hit again, cancel the scan. + */ + if (!wait) + break; + wait = false; + hpage = NULL; + khugepaged_alloc_sleep(); + } + } } static bool khugepaged_should_wakeup(void) From 6bf0fa415341f9382ef85194604243bce47f2216 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:21 -0700 Subject: [PATCH 258/321] mm/khugepaged: add struct collapse_control Modularize hugepage collapse by introducing struct collapse_control. This structure serves to describe the properties of the requested collapse, as well as serve as a local scratch pad to use during the collapse itself. Start by moving global per-node khugepaged statistics into this new structure. Note that this structure is still statically allocated since CONFIG_NODES_SHIFT might be arbitrary large, and stack-allocating a MAX_NUMNODES-sized array could cause -Wframe-large-than= errors. Link: https://lkml.kernel.org/r/20220706235936.2197195-4-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 87 ++++++++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 37 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 196eaadbf4151..f1ef02d9fe07c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -85,6 +85,14 @@ static struct kmem_cache *mm_slot_cache __read_mostly; #define MAX_PTE_MAPPED_THP 8 +struct collapse_control { + /* Num pages scanned per node */ + int node_load[MAX_NUMNODES]; + + /* Last target selected in khugepaged_find_target_node() */ + int last_target_node; +}; + /** * struct mm_slot - hash lookup from mm to mm_slot * @hash: hash collision list @@ -735,9 +743,12 @@ static void khugepaged_alloc_sleep(void) remove_wait_queue(&khugepaged_wait, &wait); } -static int khugepaged_node_load[MAX_NUMNODES]; -static bool khugepaged_scan_abort(int nid) +struct collapse_control khugepaged_collapse_control = { + .last_target_node = NUMA_NO_NODE, +}; + +static bool khugepaged_scan_abort(int nid, struct collapse_control *cc) { int i; @@ -749,11 +760,11 @@ static bool khugepaged_scan_abort(int nid) return false; /* If there is a count for this node already, it must be acceptable */ - if (khugepaged_node_load[nid]) + if (cc->node_load[nid]) return false; for (i = 0; i < MAX_NUMNODES; i++) { - if (!khugepaged_node_load[i]) + if (!cc->node_load[i]) continue; if (node_distance(nid, i) > node_reclaim_distance) return true; @@ -772,32 +783,31 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) } #ifdef CONFIG_NUMA -static int khugepaged_find_target_node(void) +static int khugepaged_find_target_node(struct collapse_control *cc) { - static int last_khugepaged_target_node = NUMA_NO_NODE; int nid, target_node = 0, max_value = 0; /* find first node with max normal pages hit */ for (nid = 0; nid < MAX_NUMNODES; nid++) - if (khugepaged_node_load[nid] > max_value) { - max_value = khugepaged_node_load[nid]; + if (cc->node_load[nid] > max_value) { + max_value = cc->node_load[nid]; target_node = nid; } /* do some balance if several nodes have the same hit record */ - if (target_node <= last_khugepaged_target_node) - for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; - nid++) - if (max_value == khugepaged_node_load[nid]) { + if (target_node <= cc->last_target_node) + for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES; + nid++) + if (max_value == cc->node_load[nid]) { target_node = nid; break; } - last_khugepaged_target_node = target_node; + cc->last_target_node = target_node; return target_node; } #else -static int khugepaged_find_target_node(void) +static int khugepaged_find_target_node(struct collapse_control *cc) { return 0; } @@ -1075,10 +1085,9 @@ static void collapse_huge_page(struct mm_struct *mm, return; } -static int khugepaged_scan_pmd(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, - struct page **hpage) +static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, struct page **hpage, + struct collapse_control *cc) { pmd_t *pmd; pte_t *pte, *_pte; @@ -1098,7 +1107,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, goto out; } - memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); + memset(cc->node_load, 0, sizeof(cc->node_load)); pte = pte_offset_map_lock(mm, pmd, address, &ptl); for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { @@ -1164,16 +1173,16 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, /* * Record which node the original page is from and save this - * information to khugepaged_node_load[]. + * information to cc->node_load[]. * Khugepaged will allocate hugepage from the node has the max * hit record. */ node = page_to_nid(page); - if (khugepaged_scan_abort(node)) { + if (khugepaged_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; goto out_unmap; } - khugepaged_node_load[node]++; + cc->node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; goto out_unmap; @@ -1224,7 +1233,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, out_unmap: pte_unmap_unlock(pte, ptl); if (ret) { - node = khugepaged_find_target_node(); + node = khugepaged_find_target_node(cc); /* collapse_huge_page will return with the mmap_lock released */ collapse_huge_page(mm, address, hpage, node, referenced, unmapped); @@ -1879,8 +1888,9 @@ static void collapse_file(struct mm_struct *mm, /* TODO: tracepoints */ } -static void khugepaged_scan_file(struct mm_struct *mm, - struct file *file, pgoff_t start, struct page **hpage) +static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct page **hpage, + struct collapse_control *cc) { struct page *page = NULL; struct address_space *mapping = file->f_mapping; @@ -1891,7 +1901,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, present = 0; swap = 0; - memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); + memset(cc->node_load, 0, sizeof(cc->node_load)); rcu_read_lock(); xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { if (xas_retry(&xas, page)) @@ -1916,11 +1926,11 @@ static void khugepaged_scan_file(struct mm_struct *mm, } node = page_to_nid(page); - if (khugepaged_scan_abort(node)) { + if (khugepaged_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } - khugepaged_node_load[node]++; + cc->node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; @@ -1953,7 +1963,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { - node = khugepaged_find_target_node(); + node = khugepaged_find_target_node(cc); collapse_file(mm, file, start, hpage, node); } } @@ -1961,8 +1971,9 @@ static void khugepaged_scan_file(struct mm_struct *mm, /* TODO: tracepoints */ } #else -static void khugepaged_scan_file(struct mm_struct *mm, - struct file *file, pgoff_t start, struct page **hpage) +static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct page **hpage, + struct collapse_control *cc) { BUILD_BUG(); } @@ -1973,7 +1984,8 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) #endif static unsigned int khugepaged_scan_mm_slot(unsigned int pages, - struct page **hpage) + struct page **hpage, + struct collapse_control *cc) __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { @@ -2050,12 +2062,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, mmap_read_unlock(mm); ret = 1; - khugepaged_scan_file(mm, file, pgoff, hpage); + khugepaged_scan_file(mm, file, pgoff, hpage, + cc); fput(file); } else { ret = khugepaged_scan_pmd(mm, vma, khugepaged_scan.address, - hpage); + hpage, cc); } /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; @@ -2111,7 +2124,7 @@ static int khugepaged_wait_event(void) kthread_should_stop(); } -static void khugepaged_do_scan(void) +static void khugepaged_do_scan(struct collapse_control *cc) { struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; @@ -2132,7 +2145,7 @@ static void khugepaged_do_scan(void) if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, - &hpage); + &hpage, cc); else progress = pages; spin_unlock(&khugepaged_mm_lock); @@ -2188,7 +2201,7 @@ static int khugepaged(void *none) set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { - khugepaged_do_scan(); + khugepaged_do_scan(&khugepaged_collapse_control); khugepaged_wait_work(); } From 3d348543aeb74c65852a2a8ca3be42b58dca0103 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:22 -0700 Subject: [PATCH 259/321] mm/khugepaged: dedup and simplify hugepage alloc and charging The following code is duplicated in collapse_huge_page() and collapse_file(): gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; new_page = khugepaged_alloc_page(hpage, gfp, node); if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; goto out; } if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { result = SCAN_CGROUP_CHARGE_FAIL; goto out; } count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); Also, "node" is passed as an argument to both collapse_huge_page() and collapse_file() and obtained the same way, via khugepaged_find_target_node(). Move all this into a new helper, alloc_charge_hpage(), and remove the duplicate code from collapse_huge_page() and collapse_file(). Also, simplify khugepaged_alloc_page() by returning a bool indicating allocation success instead of a copy of the allocated struct page *. Link: https://lkml.kernel.org/r/20220706235936.2197195-5-zokeefe@google.com Signed-off-by: Zach O'Keefe Suggested-by: Peter Xu Acked-by: David Rientjes Reviewed-by: Yang Shi Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 78 ++++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 43 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f1ef02d9fe07c..8068adf246202 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -813,19 +813,18 @@ static int khugepaged_find_target_node(struct collapse_control *cc) } #endif -static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) +static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) { *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); - return NULL; + return false; } prep_transhuge_page(*hpage); count_vm_event(THP_COLLAPSE_ALLOC); - return *hpage; + return true; } /* @@ -921,10 +920,24 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, return true; } -static void collapse_huge_page(struct mm_struct *mm, - unsigned long address, - struct page **hpage, - int node, int referenced, int unmapped) +static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, + struct collapse_control *cc) +{ + /* Only allocate from the target node */ + gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; + int node = khugepaged_find_target_node(cc); + + if (!khugepaged_alloc_page(hpage, gfp, node)) + return SCAN_ALLOC_HUGE_PAGE_FAIL; + if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) + return SCAN_CGROUP_CHARGE_FAIL; + count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); + return SCAN_SUCCEED; +} + +static void collapse_huge_page(struct mm_struct *mm, unsigned long address, + struct page **hpage, int referenced, + int unmapped, struct collapse_control *cc) { LIST_HEAD(compound_pagelist); pmd_t *pmd, _pmd; @@ -935,13 +948,9 @@ static void collapse_huge_page(struct mm_struct *mm, int isolated = 0, result = 0; struct vm_area_struct *vma; struct mmu_notifier_range range; - gfp_t gfp; VM_BUG_ON(address & ~HPAGE_PMD_MASK); - /* Only allocate from the target node */ - gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; - /* * Before allocating the hugepage, release the mmap_lock read lock. * The allocation can take potentially a long time if it involves @@ -949,17 +958,12 @@ static void collapse_huge_page(struct mm_struct *mm, * that. We will recheck the vma after taking it again in write mode. */ mmap_read_unlock(mm); - new_page = khugepaged_alloc_page(hpage, gfp, node); - if (!new_page) { - result = SCAN_ALLOC_HUGE_PAGE_FAIL; - goto out_nolock; - } - if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { - result = SCAN_CGROUP_CHARGE_FAIL; + result = alloc_charge_hpage(hpage, mm, cc); + if (result != SCAN_SUCCEED) goto out_nolock; - } - count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); + + new_page = *hpage; mmap_read_lock(mm); result = hugepage_vma_revalidate(mm, address, &vma); @@ -1233,10 +1237,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, out_unmap: pte_unmap_unlock(pte, ptl); if (ret) { - node = khugepaged_find_target_node(cc); /* collapse_huge_page will return with the mmap_lock released */ - collapse_huge_page(mm, address, hpage, node, - referenced, unmapped); + collapse_huge_page(mm, address, hpage, referenced, unmapped, + cc); } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, @@ -1504,7 +1507,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * @file: file that collapse on * @start: collapse start address * @hpage: new allocated huge page for collapse - * @node: appointed node the new huge page allocate from + * @cc: collapse context and scratchpad * * Basic scheme is simple, details are more complex: * - allocate and lock a new huge page; @@ -1521,12 +1524,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * + restore gaps in the page cache; * + unlock and free huge page; */ -static void collapse_file(struct mm_struct *mm, - struct file *file, pgoff_t start, - struct page **hpage, int node) +static void collapse_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct page **hpage, + struct collapse_control *cc) { struct address_space *mapping = file->f_mapping; - gfp_t gfp; struct page *new_page; pgoff_t index, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); @@ -1538,20 +1540,11 @@ static void collapse_file(struct mm_struct *mm, VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); - /* Only allocate from the target node */ - gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; - - new_page = khugepaged_alloc_page(hpage, gfp, node); - if (!new_page) { - result = SCAN_ALLOC_HUGE_PAGE_FAIL; + result = alloc_charge_hpage(hpage, mm, cc); + if (result != SCAN_SUCCEED) goto out; - } - if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { - result = SCAN_CGROUP_CHARGE_FAIL; - goto out; - } - count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); + new_page = *hpage; /* * Ensure we have slots for all the pages in the range. This is @@ -1963,8 +1956,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { - node = khugepaged_find_target_node(cc); - collapse_file(mm, file, start, hpage, node); + collapse_file(mm, file, start, hpage, cc); } } From 1900206cb8852066c0b81dfe386e891669a81349 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:23 -0700 Subject: [PATCH 260/321] mm/khugepaged: propagate enum scan_result codes back to callers Propagate enum scan_result codes back through return values of functions downstream of khugepaged_scan_file() and khugepaged_scan_pmd() to inform callers if the operation was successful, and if not, why. Since khugepaged_scan_pmd()'s return value already has a specific meaning (whether mmap_lock was unlocked or not), add a bool* argument to khugepaged_scan_pmd() to retrieve this information. Change khugepaged to take action based on the return values of khugepaged_scan_file() and khugepaged_scan_pmd() instead of acting deep within the collapsing functions themselves. hugepage_vma_revalidate() now returns SCAN_SUCCEED on success to be more consistent with enum scan_result propagation. Remove dependency on error pointers to communicate to khugepaged that allocation failed and it should sleep; instead just use the result of the scan (SCAN_ALLOC_HUGE_PAGE_FAIL if allocation fails). Link: https://lkml.kernel.org/r/20220706235936.2197195-6-zokeefe@google.com Signed-off-by: Zach O'Keefe Reviewed-by: Yang Shi Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 233 ++++++++++++++++++++++++------------------------ 1 file changed, 117 insertions(+), 116 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8068adf246202..147f5828f052c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -558,7 +558,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, { struct page *page = NULL; pte_t *_pte; - int none_or_zero = 0, shared = 0, result = 0, referenced = 0; + int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; bool writable = false; for (_pte = pte; _pte < pte + HPAGE_PMD_NR; @@ -672,13 +672,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_SUCCEED; trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); - return 1; + return result; } out: release_pte_pages(pte, _pte, compound_pagelist); trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); - return 0; + return result; } static void __collapse_huge_page_copy(pte_t *pte, struct page *page, @@ -818,7 +818,6 @@ static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - *hpage = ERR_PTR(-ENOMEM); return false; } @@ -830,8 +829,7 @@ static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) /* * If mmap_lock temporarily dropped, revalidate vma * before taking mmap_lock. - * Return 0 if succeeds, otherwise return none-zero - * value (scan code). + * Returns enum scan_result value. */ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, @@ -857,7 +855,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, */ if (!vma->anon_vma || !vma_is_anonymous(vma)) return SCAN_VMA_CHECK; - return 0; + return SCAN_SUCCEED; } /* @@ -868,10 +866,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, * Note that if false is returned, mmap_lock will be released. */ -static bool __collapse_huge_page_swapin(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long haddr, pmd_t *pmd, - int referenced) +static int __collapse_huge_page_swapin(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long haddr, pmd_t *pmd, + int referenced) { int swapped_in = 0; vm_fault_t ret = 0; @@ -902,12 +900,13 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, */ if (ret & VM_FAULT_RETRY) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); - return false; + /* Likely, but not guaranteed, that page lock failed */ + return SCAN_PAGE_LOCK; } if (ret & VM_FAULT_ERROR) { mmap_read_unlock(mm); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); - return false; + return SCAN_FAIL; } swapped_in++; } @@ -917,7 +916,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, lru_add_drain(); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); - return true; + return SCAN_SUCCEED; } static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, @@ -935,17 +934,17 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, return SCAN_SUCCEED; } -static void collapse_huge_page(struct mm_struct *mm, unsigned long address, - struct page **hpage, int referenced, - int unmapped, struct collapse_control *cc) +static int collapse_huge_page(struct mm_struct *mm, unsigned long address, + int referenced, int unmapped, + struct collapse_control *cc) { LIST_HEAD(compound_pagelist); pmd_t *pmd, _pmd; pte_t *pte; pgtable_t pgtable; - struct page *new_page; + struct page *hpage; spinlock_t *pmd_ptl, *pte_ptl; - int isolated = 0, result = 0; + int result = SCAN_FAIL; struct vm_area_struct *vma; struct mmu_notifier_range range; @@ -959,15 +958,13 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, */ mmap_read_unlock(mm); - result = alloc_charge_hpage(hpage, mm, cc); + result = alloc_charge_hpage(&hpage, mm, cc); if (result != SCAN_SUCCEED) goto out_nolock; - new_page = *hpage; - mmap_read_lock(mm); result = hugepage_vma_revalidate(mm, address, &vma); - if (result) { + if (result != SCAN_SUCCEED) { mmap_read_unlock(mm); goto out_nolock; } @@ -979,14 +976,16 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, goto out_nolock; } - /* - * __collapse_huge_page_swapin will return with mmap_lock released - * when it fails. So we jump out_nolock directly in that case. - * Continuing to collapse causes inconsistency. - */ - if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, - pmd, referenced)) { - goto out_nolock; + if (unmapped) { + /* + * __collapse_huge_page_swapin will return with mmap_lock + * released when it fails. So we jump out_nolock directly in + * that case. Continuing to collapse causes inconsistency. + */ + result = __collapse_huge_page_swapin(mm, vma, address, pmd, + referenced); + if (result != SCAN_SUCCEED) + goto out_nolock; } mmap_read_unlock(mm); @@ -997,7 +996,7 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, */ mmap_write_lock(mm); result = hugepage_vma_revalidate(mm, address, &vma); - if (result) + if (result != SCAN_SUCCEED) goto out_up_write; /* check if the pmd is still valid */ if (mm_find_pmd(mm, address) != pmd) @@ -1024,11 +1023,11 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, mmu_notifier_invalidate_range_end(&range); spin_lock(pte_ptl); - isolated = __collapse_huge_page_isolate(vma, address, pte, - &compound_pagelist); + result = __collapse_huge_page_isolate(vma, address, pte, + &compound_pagelist); spin_unlock(pte_ptl); - if (unlikely(!isolated)) { + if (unlikely(result != SCAN_SUCCEED)) { pte_unmap(pte); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); @@ -1040,7 +1039,6 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); - result = SCAN_FAIL; goto out_up_write; } @@ -1050,8 +1048,8 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, */ anon_vma_unlock_write(vma->anon_vma); - __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, - &compound_pagelist); + __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl, + &compound_pagelist); pte_unmap(pte); /* * spin_lock() below is not the equivalent of smp_wmb(), but @@ -1059,43 +1057,42 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, * avoid the copy_huge_page writes to become visible after * the set_pmd_at() write. */ - __SetPageUptodate(new_page); + __SetPageUptodate(hpage); pgtable = pmd_pgtable(_pmd); - _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); + _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); - page_add_new_anon_rmap(new_page, vma, address); - lru_cache_add_inactive_or_unevictable(new_page, vma); + page_add_new_anon_rmap(hpage, vma, address); + lru_cache_add_inactive_or_unevictable(hpage, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); - *hpage = NULL; + hpage = NULL; - khugepaged_pages_collapsed++; result = SCAN_SUCCEED; out_up_write: mmap_write_unlock(mm); out_nolock: - if (!IS_ERR_OR_NULL(*hpage)) { - mem_cgroup_uncharge(page_folio(*hpage)); - put_page(*hpage); + if (hpage) { + mem_cgroup_uncharge(page_folio(hpage)); + put_page(hpage); } - trace_mm_collapse_huge_page(mm, isolated, result); - return; + trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); + return result; } static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, struct page **hpage, + unsigned long address, bool *mmap_locked, struct collapse_control *cc) { pmd_t *pmd; pte_t *pte, *_pte; - int ret = 0, result = 0, referenced = 0; + int result = SCAN_FAIL, referenced = 0; int none_or_zero = 0, shared = 0; struct page *page = NULL; unsigned long _address; @@ -1232,19 +1229,19 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; - ret = 1; } out_unmap: pte_unmap_unlock(pte, ptl); - if (ret) { + if (result == SCAN_SUCCEED) { + result = collapse_huge_page(mm, address, referenced, + unmapped, cc); /* collapse_huge_page will return with the mmap_lock released */ - collapse_huge_page(mm, address, hpage, referenced, unmapped, - cc); + *mmap_locked = false; } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, none_or_zero, result, unmapped); - return ret; + return result; } static void collect_mm_slot(struct mm_slot *mm_slot) @@ -1506,7 +1503,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * @mm: process address space where collapse happens * @file: file that collapse on * @start: collapse start address - * @hpage: new allocated huge page for collapse * @cc: collapse context and scratchpad * * Basic scheme is simple, details are more complex: @@ -1524,12 +1520,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * + restore gaps in the page cache; * + unlock and free huge page; */ -static void collapse_file(struct mm_struct *mm, struct file *file, - pgoff_t start, struct page **hpage, - struct collapse_control *cc) +static int collapse_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct collapse_control *cc) { struct address_space *mapping = file->f_mapping; - struct page *new_page; + struct page *hpage; pgoff_t index, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); @@ -1540,12 +1535,10 @@ static void collapse_file(struct mm_struct *mm, struct file *file, VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); - result = alloc_charge_hpage(hpage, mm, cc); + result = alloc_charge_hpage(&hpage, mm, cc); if (result != SCAN_SUCCEED) goto out; - new_page = *hpage; - /* * Ensure we have slots for all the pages in the range. This is * almost certainly a no-op because most of the pages must be present @@ -1562,14 +1555,14 @@ static void collapse_file(struct mm_struct *mm, struct file *file, } } while (1); - __SetPageLocked(new_page); + __SetPageLocked(hpage); if (is_shmem) - __SetPageSwapBacked(new_page); - new_page->index = start; - new_page->mapping = mapping; + __SetPageSwapBacked(hpage); + hpage->index = start; + hpage->mapping = mapping; /* - * At this point the new_page is locked and not up-to-date. + * At this point the hpage is locked and not up-to-date. * It's safe to insert it into the page cache, because nobody would * be able to map it or use it in another way until we unlock it. */ @@ -1597,7 +1590,7 @@ static void collapse_file(struct mm_struct *mm, struct file *file, result = SCAN_FAIL; goto xa_locked; } - xas_store(&xas, new_page); + xas_store(&xas, hpage); nr_none++; continue; } @@ -1739,19 +1732,19 @@ static void collapse_file(struct mm_struct *mm, struct file *file, list_add_tail(&page->lru, &pagelist); /* Finally, replace with the new page. */ - xas_store(&xas, new_page); + xas_store(&xas, hpage); continue; out_unlock: unlock_page(page); put_page(page); goto xa_unlocked; } - nr = thp_nr_pages(new_page); + nr = thp_nr_pages(hpage); if (is_shmem) - __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr); + __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); else { - __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr); + __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr); filemap_nr_thps_inc(mapping); /* * Paired with smp_mb() in do_dentry_open() to ensure @@ -1762,21 +1755,21 @@ static void collapse_file(struct mm_struct *mm, struct file *file, smp_mb(); if (inode_is_open_for_write(mapping->host)) { result = SCAN_FAIL; - __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr); + __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr); filemap_nr_thps_dec(mapping); goto xa_locked; } } if (nr_none) { - __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); + __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none); /* nr_none is always 0 for non-shmem. */ - __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); + __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); } /* Join all the small entries into a single multi-index entry */ xas_set_order(&xas, start, HPAGE_PMD_ORDER); - xas_store(&xas, new_page); + xas_store(&xas, hpage); xa_locked: xas_unlock_irq(&xas); xa_unlocked: @@ -1798,11 +1791,11 @@ static void collapse_file(struct mm_struct *mm, struct file *file, index = start; list_for_each_entry_safe(page, tmp, &pagelist, lru) { while (index < page->index) { - clear_highpage(new_page + (index % HPAGE_PMD_NR)); + clear_highpage(hpage + (index % HPAGE_PMD_NR)); index++; } - copy_highpage(new_page + (page->index % HPAGE_PMD_NR), - page); + copy_highpage(hpage + (page->index % HPAGE_PMD_NR), + page); list_del(&page->lru); page->mapping = NULL; page_ref_unfreeze(page, 1); @@ -1813,23 +1806,22 @@ static void collapse_file(struct mm_struct *mm, struct file *file, index++; } while (index < end) { - clear_highpage(new_page + (index % HPAGE_PMD_NR)); + clear_highpage(hpage + (index % HPAGE_PMD_NR)); index++; } - SetPageUptodate(new_page); - page_ref_add(new_page, HPAGE_PMD_NR - 1); + SetPageUptodate(hpage); + page_ref_add(hpage, HPAGE_PMD_NR - 1); if (is_shmem) - set_page_dirty(new_page); - lru_cache_add(new_page); + set_page_dirty(hpage); + lru_cache_add(hpage); /* * Remove pte page tables, so we can re-fault the page as huge. */ retract_page_tables(mapping, start); - *hpage = NULL; - - khugepaged_pages_collapsed++; + unlock_page(hpage); + hpage = NULL; } else { struct page *page; @@ -1868,22 +1860,23 @@ static void collapse_file(struct mm_struct *mm, struct file *file, VM_BUG_ON(nr_none); xas_unlock_irq(&xas); - new_page->mapping = NULL; + hpage->mapping = NULL; } - unlock_page(new_page); + if (hpage) + unlock_page(hpage); out: VM_BUG_ON(!list_empty(&pagelist)); - if (!IS_ERR_OR_NULL(*hpage)) { - mem_cgroup_uncharge(page_folio(*hpage)); - put_page(*hpage); + if (hpage) { + mem_cgroup_uncharge(page_folio(hpage)); + put_page(hpage); } /* TODO: tracepoints */ + return result; } -static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, - pgoff_t start, struct page **hpage, - struct collapse_control *cc) +static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct collapse_control *cc) { struct page *page = NULL; struct address_space *mapping = file->f_mapping; @@ -1956,16 +1949,16 @@ static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { - collapse_file(mm, file, start, hpage, cc); + result = collapse_file(mm, file, start, cc); } } /* TODO: tracepoints */ + return result; } #else -static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, - pgoff_t start, struct page **hpage, - struct collapse_control *cc) +static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, + pgoff_t start, struct collapse_control *cc) { BUILD_BUG(); } @@ -1975,8 +1968,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) } #endif -static unsigned int khugepaged_scan_mm_slot(unsigned int pages, - struct page **hpage, +static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, struct collapse_control *cc) __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) @@ -1990,6 +1982,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); + *result = SCAN_FAIL; if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; @@ -2039,7 +2032,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { - int ret; + bool mmap_locked = true; + cond_resched(); if (unlikely(khugepaged_test_exit(mm))) goto breakouterloop; @@ -2053,20 +2047,28 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, khugepaged_scan.address); mmap_read_unlock(mm); - ret = 1; - khugepaged_scan_file(mm, file, pgoff, hpage, - cc); + *result = khugepaged_scan_file(mm, file, pgoff, + cc); + mmap_locked = false; fput(file); } else { - ret = khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - hpage, cc); + *result = khugepaged_scan_pmd(mm, vma, + khugepaged_scan.address, + &mmap_locked, cc); } + if (*result == SCAN_SUCCEED) + ++khugepaged_pages_collapsed; /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; - if (ret) - /* we released mmap_lock so break loop */ + if (!mmap_locked) + /* + * We released mmap_lock so break loop. Note + * that we drop mmap_lock before all hugepage + * allocations, so if allocation fails, we are + * guaranteed to break here and report the + * correct result back to caller. + */ goto breakouterloop_mmap_lock; if (progress >= pages) goto breakouterloop; @@ -2118,10 +2120,10 @@ static int khugepaged_wait_event(void) static void khugepaged_do_scan(struct collapse_control *cc) { - struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); bool wait = true; + int result = SCAN_SUCCEED; lru_add_drain_all(); @@ -2137,7 +2139,7 @@ static void khugepaged_do_scan(struct collapse_control *cc) if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, - &hpage, cc); + &result, cc); else progress = pages; spin_unlock(&khugepaged_mm_lock); @@ -2145,7 +2147,7 @@ static void khugepaged_do_scan(struct collapse_control *cc) if (progress >= pages) break; - if (IS_ERR(hpage)) { + if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { /* * If fail to allocate the first time, try to sleep for * a while. When hit again, cancel the scan. @@ -2153,7 +2155,6 @@ static void khugepaged_do_scan(struct collapse_control *cc) if (!wait) break; wait = false; - hpage = NULL; khugepaged_alloc_sleep(); } } From 518617ff96ccba5b54682db1d8b8bc8420bffa12 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:24 -0700 Subject: [PATCH 261/321] mm/khugepaged: add flag to predicate khugepaged-only behavior Add .is_khugepaged flag to struct collapse_control so khugepaged-specific behavior can be elided by MADV_COLLAPSE context. Start by protecting khugepaged-specific heuristics by this flag. In MADV_COLLAPSE, the user presumably has reason to believe the collapse will be beneficial and khugepaged heuristics shouldn't prevent the user from doing so: 1) sysfs-controlled knobs khugepaged_max_ptes_[none|swap|shared] 2) requirement that some pages in region being collapsed be young or referenced Link: https://lkml.kernel.org/r/20220706235936.2197195-7-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 62 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 147f5828f052c..d89056d8cbad9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -73,6 +73,8 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); * default collapse hugepages if there is at least one pte mapped like * it would have happened if the vma was large enough during page * fault. + * + * Note that these are only respected if collapse was initiated by khugepaged. */ static unsigned int khugepaged_max_ptes_none __read_mostly; static unsigned int khugepaged_max_ptes_swap __read_mostly; @@ -86,6 +88,8 @@ static struct kmem_cache *mm_slot_cache __read_mostly; #define MAX_PTE_MAPPED_THP 8 struct collapse_control { + bool is_khugepaged; + /* Num pages scanned per node */ int node_load[MAX_NUMNODES]; @@ -554,6 +558,7 @@ static bool is_refcount_suitable(struct page *page) static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte, + struct collapse_control *cc, struct list_head *compound_pagelist) { struct page *page = NULL; @@ -567,7 +572,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { + (++none_or_zero <= khugepaged_max_ptes_none || + !cc->is_khugepaged)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; @@ -587,8 +593,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_PAGE(!PageAnon(page), page); - if (page_mapcount(page) > 1 && - ++shared > khugepaged_max_ptes_shared) { + if (cc->is_khugepaged && page_mapcount(page) > 1 && + ++shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out; @@ -654,10 +660,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (PageCompound(page)) list_add_tail(&page->lru, compound_pagelist); next: - /* There should be enough young pte to collapse the page */ - if (pte_young(pteval) || - page_is_young(page) || PageReferenced(page) || - mmu_notifier_test_young(vma->vm_mm, address)) + /* + * If collapse was initiated by khugepaged, check that there is + * enough young pte to justify collapsing the page + */ + if (cc->is_khugepaged && + (pte_young(pteval) || page_is_young(page) || + PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + address))) referenced++; if (pte_write(pteval)) @@ -666,7 +676,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (unlikely(!writable)) { result = SCAN_PAGE_RO; - } else if (unlikely(!referenced)) { + } else if (unlikely(cc->is_khugepaged && !referenced)) { result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; @@ -745,6 +755,7 @@ static void khugepaged_alloc_sleep(void) struct collapse_control khugepaged_collapse_control = { + .is_khugepaged = true, .last_target_node = NUMA_NO_NODE, }; @@ -1023,7 +1034,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmu_notifier_invalidate_range_end(&range); spin_lock(pte_ptl); - result = __collapse_huge_page_isolate(vma, address, pte, + result = __collapse_huge_page_isolate(vma, address, pte, cc, &compound_pagelist); spin_unlock(pte_ptl); @@ -1114,7 +1125,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (is_swap_pte(pteval)) { - if (++unmapped <= khugepaged_max_ptes_swap) { + if (++unmapped <= khugepaged_max_ptes_swap || + !cc->is_khugepaged) { /* * Always be strict with uffd-wp * enabled swap entries. Please see @@ -1133,7 +1145,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { + (++none_or_zero <= khugepaged_max_ptes_none || + !cc->is_khugepaged)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; @@ -1163,8 +1176,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, goto out_unmap; } - if (page_mapcount(page) > 1 && - ++shared > khugepaged_max_ptes_shared) { + if (cc->is_khugepaged && + page_mapcount(page) > 1 && + ++shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out_unmap; @@ -1218,14 +1232,22 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, result = SCAN_PAGE_COUNT; goto out_unmap; } - if (pte_young(pteval) || - page_is_young(page) || PageReferenced(page) || - mmu_notifier_test_young(vma->vm_mm, address)) + + /* + * If collapse was initiated by khugepaged, check that there is + * enough young pte to justify collapsing the page + */ + if (cc->is_khugepaged && + (pte_young(pteval) || page_is_young(page) || + PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + address))) referenced++; } if (!writable) { result = SCAN_PAGE_RO; - } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { + } else if (cc->is_khugepaged && + (!referenced || + (unmapped && referenced < HPAGE_PMD_NR / 2))) { result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; @@ -1894,7 +1916,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, continue; if (xa_is_value(page)) { - if (++swap > khugepaged_max_ptes_swap) { + if (cc->is_khugepaged && + ++swap > khugepaged_max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); break; @@ -1945,7 +1968,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, rcu_read_unlock(); if (result == SCAN_SUCCEED) { - if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { + if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none && + cc->is_khugepaged) { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { From 2477076f6e9d2694126a9b0a8cc6a90b3af04a3c Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:25 -0700 Subject: [PATCH 262/321] mm/thp: add flag to enforce sysfs THP in hugepage_vma_check() MADV_COLLAPSE is not coupled to the kernel-oriented sysfs THP settings[1]. hugepage_vma_check() is the authority on determining if a VMA is eligible for THP allocation/collapse, and currently enforces the sysfs THP settings. Add a flag to disable these checks. For now, only apply this arg to anon and file, which use /sys/kernel/transparent_hugepage/enabled. We can expand this to shmem, which uses /sys/kernel/transparent_hugepage/shmem_enabled, later. Use this flag in collapse_pte_mapped_thp() where previously the VMA flags passed to hugepage_vma_check() were OR'd with VM_HUGEPAGE to elide the VM_HUGEPAGE check in "madvise" THP mode. Prior to "mm: khugepaged: check THP flag in hugepage_vma_check()", this check also didn't check "never" THP mode. As such, this restores the previous behavior of collapse_pte_mapped_thp() where sysfs THP settings are ignored. See comment in code for justification why this is OK. [1] https://lore.kernel.org/linux-mm/CAAa6QmQxay1_=Pmt8oCX2-Va18t44FV-Vs-WsQt_6+qBks4nZA@mail.gmail.com/ Link: https://lkml.kernel.org/r/20220706235936.2197195-8-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 2 +- include/linux/huge_mm.h | 9 ++++----- mm/huge_memory.c | 14 ++++++-------- mm/khugepaged.c | 25 ++++++++++++++----------- mm/memory.c | 4 ++-- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 34d292cec79a6..f8cd58846a28b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -866,7 +866,7 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); seq_printf(m, "THPeligible: %d\n", - hugepage_vma_check(vma, vma->vm_flags, true, false)); + hugepage_vma_check(vma, vma->vm_flags, true, false, true)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 37f2f11a6d7ee..00312fc251c1b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -168,9 +168,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, - bool smaps, bool in_pf); +bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, + bool smaps, bool in_pf, bool enforce_sysfs); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -321,8 +320,8 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, } static inline bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, - bool smaps, bool in_pf) + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs) { return false; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9f70582b8f512..0dfc67dec1087 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -69,9 +69,8 @@ static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; -bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, - bool smaps, bool in_pf) +bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, + bool smaps, bool in_pf, bool enforce_sysfs) { if (!vma->vm_mm) /* vdso */ return false; @@ -120,11 +119,10 @@ bool hugepage_vma_check(struct vm_area_struct *vma, if (!in_pf && shmem_file(vma->vm_file)) return shmem_huge_enabled(vma); - if (!hugepage_flags_enabled()) - return false; - - /* THP settings require madvise. */ - if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always()) + /* Enforce sysfs THP requirements as necessary */ + if (enforce_sysfs && + (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && + !hugepage_flags_always()))) return false; /* Only regular file is valid */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d89056d8cbad9..b0e20db3f8058 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -478,7 +478,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && hugepage_flags_enabled()) { - if (hugepage_vma_check(vma, vm_flags, false, false)) + if (hugepage_vma_check(vma, vm_flags, false, false, true)) __khugepaged_enter(vma->vm_mm); } } @@ -844,7 +844,8 @@ static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) */ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, - struct vm_area_struct **vmap) + struct vm_area_struct **vmap, + struct collapse_control *cc) { struct vm_area_struct *vma; @@ -855,7 +856,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!vma) return SCAN_VMA_NULL; - if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) + if (!hugepage_vma_check(vma, vma->vm_flags, false, false, + cc->is_khugepaged)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then @@ -974,7 +976,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, goto out_nolock; mmap_read_lock(mm); - result = hugepage_vma_revalidate(mm, address, &vma); + result = hugepage_vma_revalidate(mm, address, &vma, cc); if (result != SCAN_SUCCEED) { mmap_read_unlock(mm); goto out_nolock; @@ -1006,7 +1008,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, * handled by the anon_vma lock + PG_lock. */ mmap_write_lock(mm); - result = hugepage_vma_revalidate(mm, address, &vma); + result = hugepage_vma_revalidate(mm, address, &vma, cc); if (result != SCAN_SUCCEED) goto out_up_write; /* check if the pmd is still valid */ @@ -1350,12 +1352,13 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) return; /* - * This vm_flags may not have VM_HUGEPAGE if the page was not - * collapsed by this mm. But we can still collapse if the page is - * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() - * will not fail the vma for missing VM_HUGEPAGE + * If we are here, we've succeeded in replacing all the native pages + * in the page cache with a single hugepage. If a mm were to fault-in + * this memory (mapped by a suitably aligned VMA), we'd get the hugepage + * and map it by a PMD, regardless of sysfs THP settings. As such, let's + * analogously elide sysfs THP settings here. */ - if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false)) + if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) return; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ @@ -2042,7 +2045,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, progress++; break; } - if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) { + if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { skip: progress++; continue; diff --git a/mm/memory.c b/mm/memory.c index f77e8b2a651ec..7722698203a6b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5002,7 +5002,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && - hugepage_vma_check(vma, vm_flags, false, true)) { + hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5036,7 +5036,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, goto retry_pud; if (pmd_none(*vmf.pmd) && - hugepage_vma_check(vma, vm_flags, false, true)) { + hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; From 7f1a7e68f57885ae91081b7705ba904758abd1db Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:26 -0700 Subject: [PATCH 263/321] mm/khugepaged: record SCAN_PMD_MAPPED when scan_pmd() finds hugepage When scanning an anon pmd to see if it's eligible for collapse, return SCAN_PMD_MAPPED if the pmd already maps a hugepage. Note that SCAN_PMD_MAPPED is different from SCAN_PAGE_COMPOUND used in the file-collapse path, since the latter might identify pte-mapped compound pages. This is required by MADV_COLLAPSE which necessarily needs to know what hugepage-aligned/sized regions are already pmd-mapped. In order to determine if a pmd already maps a hugepage, refactor mm_find_pmd(): Return mm_find_pmd() to it's pre-commit f72e7dcdd252 ("mm: let mm_find_pmd fix buggy race with THP fault") behavior. ksm was the only caller that explicitly wanted a pte-mapping pmd, so open code the pte-mapping logic there (pmd_present() and pmd_trans_huge() checks). Undo revert change in commit f72e7dcdd252 ("mm: let mm_find_pmd fix buggy race with THP fault") that open-coded split_huge_pmd_address() pmd lookup and use mm_find_pmd() instead. Link: https://lkml.kernel.org/r/20220706235936.2197195-9-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- include/trace/events/huge_memory.h | 1 + mm/huge_memory.c | 18 +-------- mm/internal.h | 2 +- mm/khugepaged.c | 60 ++++++++++++++++++++++++------ mm/ksm.c | 10 +++++ mm/rmap.c | 15 +++----- 6 files changed, 67 insertions(+), 39 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index d651f3437367d..55392bf30a034 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -11,6 +11,7 @@ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ EM( SCAN_PMD_NULL, "pmd_null") \ + EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0dfc67dec1087..b81a81086f020 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2263,25 +2263,11 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct folio *folio) { - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; + pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); - pgd = pgd_offset(vma->vm_mm, address); - if (!pgd_present(*pgd)) + if (!pmd) return; - p4d = p4d_offset(pgd, address); - if (!p4d_present(*p4d)) - return; - - pud = pud_offset(p4d, address); - if (!pud_present(*pud)) - return; - - pmd = pmd_offset(pud, address); - __split_huge_pmd(vma, pmd, address, freeze, folio); } diff --git a/mm/internal.h b/mm/internal.h index 6e14749ad1e53..ef8c23fb678f7 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -188,7 +188,7 @@ extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason /* * in mm/rmap.c: */ -extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); +pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* * in mm/page_alloc.c diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b0e20db3f8058..c7a09cc9a0e8e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -28,6 +28,7 @@ enum scan_result { SCAN_FAIL, SCAN_SUCCEED, SCAN_PMD_NULL, + SCAN_PMD_MAPPED, SCAN_EXCEED_NONE_PTE, SCAN_EXCEED_SWAP_PTE, SCAN_EXCEED_SHARED_PTE, @@ -871,6 +872,45 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_SUCCEED; } +static int find_pmd_or_thp_or_none(struct mm_struct *mm, + unsigned long address, + pmd_t **pmd) +{ + pmd_t pmde; + + *pmd = mm_find_pmd(mm, address); + if (!*pmd) + return SCAN_PMD_NULL; + + pmde = pmd_read_atomic(*pmd); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* See comments in pmd_none_or_trans_huge_or_clear_bad() */ + barrier(); +#endif + if (!pmd_present(pmde)) + return SCAN_PMD_NULL; + if (pmd_trans_huge(pmde)) + return SCAN_PMD_MAPPED; + if (pmd_bad(pmde)) + return SCAN_PMD_NULL; + return SCAN_SUCCEED; +} + +static int check_pmd_still_valid(struct mm_struct *mm, + unsigned long address, + pmd_t *pmd) +{ + pmd_t *new_pmd; + int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); + + if (result != SCAN_SUCCEED) + return result; + if (new_pmd != pmd) + return SCAN_FAIL; + return SCAN_SUCCEED; +} + /* * Bring missing pages in from swap, to complete THP collapse. * Only done if khugepaged_scan_pmd believes it is worthwhile. @@ -982,9 +1022,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, goto out_nolock; } - pmd = mm_find_pmd(mm, address); - if (!pmd) { - result = SCAN_PMD_NULL; + result = find_pmd_or_thp_or_none(mm, address, &pmd); + if (result != SCAN_SUCCEED) { mmap_read_unlock(mm); goto out_nolock; } @@ -1012,7 +1051,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, if (result != SCAN_SUCCEED) goto out_up_write; /* check if the pmd is still valid */ - if (mm_find_pmd(mm, address) != pmd) + result = check_pmd_still_valid(mm, address, pmd); + if (result != SCAN_SUCCEED) goto out_up_write; anon_vma_lock_write(vma->anon_vma); @@ -1115,11 +1155,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, VM_BUG_ON(address & ~HPAGE_PMD_MASK); - pmd = mm_find_pmd(mm, address); - if (!pmd) { - result = SCAN_PMD_NULL; + result = find_pmd_or_thp_or_none(mm, address, &pmd); + if (result != SCAN_SUCCEED) goto out; - } memset(cc->node_load, 0, sizeof(cc->node_load)); pte = pte_offset_map_lock(mm, pmd, address, &ptl); @@ -1373,8 +1411,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!PageHead(hpage)) goto drop_hpage; - pmd = mm_find_pmd(mm, haddr); - if (!pmd) + if (find_pmd_or_thp_or_none(mm, haddr, &pmd) != SCAN_SUCCEED) goto drop_hpage; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); @@ -1492,8 +1529,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; mm = vma->vm_mm; - pmd = mm_find_pmd(mm, addr); - if (!pmd) + if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) continue; /* * We need exclusive mmap_lock to retract page table. diff --git a/mm/ksm.c b/mm/ksm.c index 075123602bd07..3e0a0a42fa1ff 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1136,6 +1136,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd; + pmd_t pmde; pte_t *ptep; pte_t newpte; spinlock_t *ptl; @@ -1150,6 +1151,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, pmd = mm_find_pmd(mm, addr); if (!pmd) goto out; + /* + * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() + * without holding anon_vma lock for write. So when looking for a + * genuine pmde (in which to find pte), test present and !THP together. + */ + pmde = *pmd; + barrier(); + if (!pmd_present(pmde) || pmd_trans_huge(pmde)) + goto out; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, addr + PAGE_SIZE); diff --git a/mm/rmap.c b/mm/rmap.c index edc06c52bc82e..af775855e58f0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -767,13 +767,17 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) return vma_address(page, vma); } +/* + * Returns the actual pmd_t* where we expect 'address' to be mapped from, or + * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* + * represents. + */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; - pmd_t pmde; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) @@ -788,15 +792,6 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) goto out; pmd = pmd_offset(pud, address); - /* - * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() - * without holding anon_vma lock for write. So when looking for a - * genuine pmde (in which to find pte), test present and !THP together. - */ - pmde = *pmd; - barrier(); - if (!pmd_present(pmde) || pmd_trans_huge(pmde)) - pmd = NULL; out: return pmd; } From 9f626ffb1d1c5c26b4a8e0fdffec784a5f773274 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:27 -0700 Subject: [PATCH 264/321] mm/madvise: introduce MADV_COLLAPSE sync hugepage collapse This idea was introduced by David Rientjes[1]. Introduce a new madvise mode, MADV_COLLAPSE, that allows users to request a synchronous collapse of memory at their own expense. The benefits of this approach are: * CPU is charged to the process that wants to spend the cycles for the THP * Avoid unpredictable timing of khugepaged collapse Semantics This call is independent of the system-wide THP sysfs settings, but will fail for memory marked VM_NOHUGEPAGE. If the ranges provided span multiple VMAs, the semantics of the collapse over each VMA is independent from the others. This implies a hugepage cannot cross a VMA boundary. If collapse of a given hugepage-aligned/sized region fails, the operation may continue to attempt collapsing the remainder of memory specified. The memory ranges provided must be page-aligned, but are not required to be hugepage-aligned. If the memory ranges are not hugepage-aligned, the start/end of the range will be clamped to the first/last hugepage-aligned address covered by said range. The memory ranges must span at least one hugepage-sized region. All non-resident pages covered by the range will first be swapped/faulted-in, before being internally copied onto a freshly allocated hugepage. Unmapped pages will have their data directly initialized to 0 in the new hugepage. However, for every eligible hugepage aligned/sized region to-be collapsed, at least one page must currently be backed by memory (a PMD covering the address range must already exist). Allocation for the new hugepage may enter direct reclaim and/or compaction, regardless of VMA flags. When the system has multiple NUMA nodes, the hugepage will be allocated from the node providing the most native pages. This operation operates on the current state of the specified process and makes no persistent changes or guarantees on how pages will be mapped, constructed, or faulted in the future Return Value If all hugepage-sized/aligned regions covered by the provided range were either successfully collapsed, or were already PMD-mapped THPs, this operation will be deemed successful. On success, process_madvise(2) returns the number of bytes advised, and madvise(2) returns 0. Else, -1 is returned and errno is set to indicate the error for the most-recently attempted hugepage collapse. Note that many failures might have occurred, since the operation may continue to collapse in the event a single hugepage-sized/aligned region fails. ENOMEM Memory allocation failed or VMA not found EBUSY Memcg charging failed EAGAIN Required resource temporarily unavailable. Try again might succeed. EINVAL Other error: No PMD found, subpage doesn't have Present bit set, "Special" page no backed by struct page, VMA incorrectly sized, address not page-aligned, ... Most notable here is ENOMEM and EBUSY (new to madvise) which are intended to provide the caller with actionable feedback so they may take an appropriate fallback measure. Use Cases An immediate user of this new functionality are malloc() implementations that manage memory in hugepage-sized chunks, but sometimes subrelease memory back to the system in native-sized chunks via MADV_DONTNEED; zapping the pmd. Later, when the memory is hot, the implementation could madvise(MADV_COLLAPSE) to re-back the memory by THPs to regain hugepage coverage and dTLB performance. TCMalloc is such an implementation that could benefit from this[2]. Only privately-mapped anon memory is supported for now, but additional support for file, shmem, and HugeTLB high-granularity mappings[2] is expected. File and tmpfs/shmem support would permit: * Backing executable text by THPs. Current support provided by CONFIG_READ_ONLY_THP_FOR_FS may take a long time on a large system which might impair services from serving at their full rated load after (re)starting. Tricks like mremap(2)'ing text onto anonymous memory to immediately realize iTLB performance prevents page sharing and demand paging, both of which increase steady state memory footprint. With MADV_COLLAPSE, we get the best of both worlds: Peak upfront performance and lower RAM footprints. * Backing guest memory by hugapages after the memory contents have been migrated in native-page-sized chunks to a new host, in a userfaultfd-based live-migration stack. [1] https://lore.kernel.org/linux-mm/d098c392-273a-36a4-1a29-59731cdf5d3d@google.com/ [2] https://github.com/google/tcmalloc/tree/master/tcmalloc Link: https://lkml.kernel.org/r/20220706235936.2197195-10-zokeefe@google.com Signed-off-by: Zach O'Keefe Suggested-by: David Rientjes Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/alpha/include/uapi/asm/mman.h | 2 + arch/mips/include/uapi/asm/mman.h | 2 + arch/parisc/include/uapi/asm/mman.h | 2 + arch/xtensa/include/uapi/asm/mman.h | 2 + include/linux/huge_mm.h | 14 ++- include/uapi/asm-generic/mman-common.h | 2 + mm/khugepaged.c | 118 ++++++++++++++++++- mm/madvise.c | 5 + tools/include/uapi/asm-generic/mman-common.h | 2 + 9 files changed, 146 insertions(+), 3 deletions(-) diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index 4aa996423b0d1..763929e814e9a 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -76,6 +76,8 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 1be428663c102..c6e1fc77c9968 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -103,6 +103,8 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index a7ea3204a5faa..22133a6a506ef 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -70,6 +70,8 @@ #define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ +#define MADV_COLLAPSE 73 /* Synchronous hugepage collapse */ + #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 7966a58af472a..1ff0c858544fa 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -111,6 +111,8 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 00312fc251c1b..39193623442ef 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -218,6 +218,9 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); +int madvise_collapse(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, unsigned long end); void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next); spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); @@ -361,9 +364,16 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma, static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { - BUG(); - return 0; + return -EINVAL; } + +static inline int madvise_collapse(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, unsigned long end) +{ + return -EINVAL; +} + static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 6c1aa92a92e44..6ce1f1ceb432c 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -77,6 +77,8 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c7a09cc9a0e8e..2b2d832e44f26 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -976,7 +976,8 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, struct collapse_control *cc) { /* Only allocate from the target node */ - gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; + gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : + GFP_TRANSHUGE) | __GFP_THISNODE; int node = khugepaged_find_target_node(cc); if (!khugepaged_alloc_page(hpage, gfp, node)) @@ -2356,3 +2357,118 @@ void khugepaged_min_free_kbytes_update(void) set_recommended_min_free_kbytes(); mutex_unlock(&khugepaged_mutex); } + +static int madvise_collapse_errno(enum scan_result r) +{ + /* + * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide + * actionable feedback to caller, so they may take an appropriate + * fallback measure depending on the nature of the failure. + */ + switch (r) { + case SCAN_ALLOC_HUGE_PAGE_FAIL: + return -ENOMEM; + case SCAN_CGROUP_CHARGE_FAIL: + return -EBUSY; + /* Resource temporary unavailable - trying again might succeed */ + case SCAN_PAGE_LOCK: + case SCAN_PAGE_LRU: + return -EAGAIN; + /* + * Other: Trying again likely not to succeed / error intrinsic to + * specified memory range. khugepaged likely won't be able to collapse + * either. + */ + default: + return -EINVAL; + } +} + +int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, + unsigned long start, unsigned long end) +{ + struct collapse_control *cc; + struct mm_struct *mm = vma->vm_mm; + unsigned long hstart, hend, addr; + int thps = 0, last_fail = SCAN_FAIL; + bool mmap_locked = true; + + BUG_ON(vma->vm_start > start); + BUG_ON(vma->vm_end < end); + + cc = kmalloc(sizeof(*cc), GFP_KERNEL); + if (!cc) + return -ENOMEM; + cc->is_khugepaged = false; + cc->last_target_node = NUMA_NO_NODE; + + *prev = vma; + + /* TODO: Support file/shmem */ + if (!vma->anon_vma || !vma_is_anonymous(vma)) + return -EINVAL; + + hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = end & HPAGE_PMD_MASK; + + if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) + return -EINVAL; + + mmgrab(mm); + lru_add_drain_all(); + + for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { + int result = SCAN_FAIL; + + if (!mmap_locked) { + cond_resched(); + mmap_read_lock(mm); + mmap_locked = true; + result = hugepage_vma_revalidate(mm, addr, &vma, cc); + if (result != SCAN_SUCCEED) { + last_fail = result; + goto out_nolock; + } + } + mmap_assert_locked(mm); + memset(cc->node_load, 0, sizeof(cc->node_load)); + result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, cc); + if (!mmap_locked) + *prev = NULL; /* Tell caller we dropped mmap_lock */ + + switch (result) { + case SCAN_SUCCEED: + case SCAN_PMD_MAPPED: + ++thps; + break; + /* Whitelisted set of results where continuing OK */ + case SCAN_PMD_NULL: + case SCAN_PTE_NON_PRESENT: + case SCAN_PTE_UFFD_WP: + case SCAN_PAGE_RO: + case SCAN_LACK_REFERENCED_PAGE: + case SCAN_PAGE_NULL: + case SCAN_PAGE_COUNT: + case SCAN_PAGE_LOCK: + case SCAN_PAGE_COMPOUND: + case SCAN_PAGE_LRU: + last_fail = result; + break; + default: + last_fail = result; + /* Other error, exit */ + goto out_maybelock; + } + } + +out_maybelock: + /* Caller expects us to hold mmap_lock on return */ + if (!mmap_locked) + mmap_read_lock(mm); +out_nolock: + mmap_assert_locked(mm); + mmdrop(mm); + + return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 + : madvise_collapse_errno(last_fail); +} diff --git a/mm/madvise.c b/mm/madvise.c index 851fa4e134bc5..9f08e958ea861 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -59,6 +59,7 @@ static int madvise_need_mmap_write(int behavior) case MADV_FREE: case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: + case MADV_COLLAPSE: return 0; default: /* be safe, default to 1. list exceptions explicitly */ @@ -1057,6 +1058,8 @@ static int madvise_vma_behavior(struct vm_area_struct *vma, if (error) goto out; break; + case MADV_COLLAPSE: + return madvise_collapse(vma, prev, start, end); } anon_name = anon_vma_name(vma); @@ -1150,6 +1153,7 @@ madvise_behavior_valid(int behavior) #ifdef CONFIG_TRANSPARENT_HUGEPAGE case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: + case MADV_COLLAPSE: #endif case MADV_DONTDUMP: case MADV_DODUMP: @@ -1339,6 +1343,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, * MADV_NOHUGEPAGE - mark the given range as not worth being backed by * transparent huge pages so the existing pages will not be * coalesced into THP and new pages will not be allocated as THP. + * MADV_COLLAPSE - synchronously coalesce pages into new THP. * MADV_DONTDUMP - the application wants to prevent pages in the given range * from being included in its core dump. * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index 6c1aa92a92e44..6ce1f1ceb432c 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -77,6 +77,8 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + /* compatibility flags */ #define MAP_FILE 0 From 4f31dd9bc1b61e28461630405c3e9e2e42c7f4ca Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:28 -0700 Subject: [PATCH 265/321] mm/khugepaged: rename prefix of shared collapse functions The following functions are shared between khugepaged and madvise collapse contexts. Replace the "khugepaged_" prefix with generic "hpage_collapse_" prefix in such cases: khugepaged_test_exit() -> hpage_collapse_test_exit() khugepaged_scan_abort() -> hpage_collapse_scan_abort() khugepaged_scan_pmd() -> hpage_collapse_scan_pmd() khugepaged_find_target_node() -> hpage_collapse_find_target_node() khugepaged_alloc_page() -> hpage_collapse_alloc_page() The kerenel ABI (e.g. huge_memory:mm_khugepaged_scan_pmd tracepoint) is unaltered. Link: https://lkml.kernel.org/r/20220706235936.2197195-11-zokeefe@google.com Signed-off-by: Zach O'Keefe Reviewed-by: Yang Shi Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/khugepaged.c | 68 +++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2b2d832e44f26..e0d00180512c6 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -94,7 +94,7 @@ struct collapse_control { /* Num pages scanned per node */ int node_load[MAX_NUMNODES]; - /* Last target selected in khugepaged_find_target_node() */ + /* Last target selected in hpage_collapse_find_target_node() */ int last_target_node; }; @@ -438,7 +438,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); } -static inline int khugepaged_test_exit(struct mm_struct *mm) +static inline int hpage_collapse_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } @@ -453,7 +453,7 @@ void __khugepaged_enter(struct mm_struct *mm) return; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return; @@ -505,11 +505,10 @@ void __khugepaged_exit(struct mm_struct *mm) } else if (mm_slot) { /* * This is required to serialize against - * khugepaged_test_exit() (which is guaranteed to run - * under mmap sem read mode). Stop here (after we - * return all pagetables will be destroyed) until - * khugepaged has finished working on the pagetables - * under the mmap_lock. + * hpage_collapse_test_exit() (which is guaranteed to run + * under mmap sem read mode). Stop here (after we return all + * pagetables will be destroyed) until khugepaged has finished + * working on the pagetables under the mmap_lock. */ mmap_write_lock(mm); mmap_write_unlock(mm); @@ -754,13 +753,12 @@ static void khugepaged_alloc_sleep(void) remove_wait_queue(&khugepaged_wait, &wait); } - struct collapse_control khugepaged_collapse_control = { .is_khugepaged = true, .last_target_node = NUMA_NO_NODE, }; -static bool khugepaged_scan_abort(int nid, struct collapse_control *cc) +static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) { int i; @@ -795,7 +793,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) } #ifdef CONFIG_NUMA -static int khugepaged_find_target_node(struct collapse_control *cc) +static int hpage_collapse_find_target_node(struct collapse_control *cc) { int nid, target_node = 0, max_value = 0; @@ -819,13 +817,13 @@ static int khugepaged_find_target_node(struct collapse_control *cc) return target_node; } #else -static int khugepaged_find_target_node(struct collapse_control *cc) +static int hpage_collapse_find_target_node(struct collapse_control *cc) { return 0; } #endif -static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) +static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node) { *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { @@ -850,7 +848,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, { struct vm_area_struct *vma; - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(hpage_collapse_test_exit(mm))) return SCAN_ANY_PROCESS; *vmap = vma = find_vma(mm, address); @@ -913,7 +911,7 @@ static int check_pmd_still_valid(struct mm_struct *mm, /* * Bring missing pages in from swap, to complete THP collapse. - * Only done if khugepaged_scan_pmd believes it is worthwhile. + * Only done if hpage_collapse_scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held. * Note that if false is returned, mmap_lock will be released. @@ -978,9 +976,9 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, /* Only allocate from the target node */ gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : GFP_TRANSHUGE) | __GFP_THISNODE; - int node = khugepaged_find_target_node(cc); + int node = hpage_collapse_find_target_node(cc); - if (!khugepaged_alloc_page(hpage, gfp, node)) + if (!hpage_collapse_alloc_page(hpage, gfp, node)) return SCAN_ALLOC_HUGE_PAGE_FAIL; if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) return SCAN_CGROUP_CHARGE_FAIL; @@ -1140,9 +1138,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, return result; } -static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, bool *mmap_locked, - struct collapse_control *cc) +static int hpage_collapse_scan_pmd(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, bool *mmap_locked, + struct collapse_control *cc) { pmd_t *pmd; pte_t *pte, *_pte; @@ -1234,7 +1233,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, * hit record. */ node = page_to_nid(page); - if (khugepaged_scan_abort(node, cc)) { + if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; goto out_unmap; } @@ -1313,7 +1312,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) lockdep_assert_held(&khugepaged_mm_lock); - if (khugepaged_test_exit(mm)) { + if (hpage_collapse_test_exit(mm)) { /* free mm_slot */ hash_del(&mm_slot->hash); list_del(&mm_slot->mm_node); @@ -1486,7 +1485,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) if (!mmap_write_trylock(mm)) return; - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(hpage_collapse_test_exit(mm))) goto out; for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) @@ -1548,7 +1547,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * it'll always mapped in small page size for uffd-wp * registered ranges. */ - if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma)) + if (!hpage_collapse_test_exit(mm) && + !userfaultfd_wp(vma)) collapse_and_free_pmd(mm, vma, addr, pmd); mmap_write_unlock(mm); } else { @@ -1975,7 +1975,7 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, } node = page_to_nid(page); - if (khugepaged_scan_abort(node, cc)) { + if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } @@ -2069,7 +2069,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, goto breakouterloop_mmap_lock; progress++; - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; address = khugepaged_scan.address; @@ -2078,7 +2078,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, unsigned long hstart, hend; cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) { + if (unlikely(hpage_collapse_test_exit(mm))) { progress++; break; } @@ -2099,7 +2099,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, bool mmap_locked = true; cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || @@ -2116,9 +2116,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, mmap_locked = false; fput(file); } else { - *result = khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - &mmap_locked, cc); + *result = hpage_collapse_scan_pmd(mm, vma, + khugepaged_scan.address, + &mmap_locked, + cc); } if (*result == SCAN_SUCCEED) ++khugepaged_pages_collapsed; @@ -2148,7 +2149,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ - if (khugepaged_test_exit(mm) || !vma) { + if (hpage_collapse_test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find @@ -2432,7 +2433,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, } mmap_assert_locked(mm); memset(cc->node_load, 0, sizeof(cc->node_load)); - result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, cc); + result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked, + cc); if (!mmap_locked) *prev = NULL; /* Tell caller we dropped mmap_lock */ From 260a843de8382d7a9d69f712a5d71f8838037c61 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:29 -0700 Subject: [PATCH 266/321] mm/madvise: add huge_memory:mm_madvise_collapse tracepoint Add a tracepoint to expose mm, address, and enum scan_result of each hugepage attempted to be collapsed by call to madvise(MADV_COLLAPSE). Link: https://lkml.kernel.org/r/20220706235936.2197195-12-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- include/trace/events/huge_memory.h | 22 ++++++++++++++++++++++ mm/khugepaged.c | 2 ++ 2 files changed, 24 insertions(+) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 55392bf30a034..38d339ffdb16b 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -167,5 +167,27 @@ TRACE_EVENT(mm_collapse_huge_page_swapin, __entry->ret) ); +TRACE_EVENT(mm_madvise_collapse, + + TP_PROTO(struct mm_struct *mm, unsigned long addr, int result), + + TP_ARGS(mm, addr, result), + + TP_STRUCT__entry(__field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(int, result) + ), + + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->result = result; + ), + + TP_printk("mm=%p addr=%#lx result=%s", + __entry->mm, + __entry->addr, + __print_symbolic(__entry->result, SCAN_STATUS)) +); + #endif /* __HUGE_MEMORY_H */ #include diff --git a/mm/khugepaged.c b/mm/khugepaged.c index e0d00180512c6..0207fc0a5b2af 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2438,6 +2438,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, if (!mmap_locked) *prev = NULL; /* Tell caller we dropped mmap_lock */ + trace_mm_madvise_collapse(mm, addr, result); + switch (result) { case SCAN_SUCCEED: case SCAN_PMD_MAPPED: From 66f3c6dfdf33703d3a6a31905bb88efca1a8773e Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:30 -0700 Subject: [PATCH 267/321] mm/madvise: add MADV_COLLAPSE to process_madvise() Allow MADV_COLLAPSE behavior for process_madvise(2) if caller has CAP_SYS_ADMIN or is requesting collapse of it's own memory. This is useful for the development of userspace agents that seek to optimize THP utilization system-wide by using userspace signals to prioritize what memory is most deserving of being THP-backed. Link: https://lkml.kernel.org/r/20220706235936.2197195-13-zokeefe@google.com Signed-off-by: Zach O'Keefe Acked-by: David Rientjes Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/madvise.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 9f08e958ea861..6fb6b7160bdad 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1171,13 +1171,15 @@ madvise_behavior_valid(int behavior) } static bool -process_madvise_behavior_valid(int behavior) +process_madvise_behavior_valid(int behavior, struct task_struct *task) { switch (behavior) { case MADV_COLD: case MADV_PAGEOUT: case MADV_WILLNEED: return true; + case MADV_COLLAPSE: + return task == current || capable(CAP_SYS_ADMIN); default: return false; } @@ -1455,7 +1457,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, goto free_iov; } - if (!process_madvise_behavior_valid(behavior)) { + if (!process_madvise_behavior_valid(behavior, task)) { ret = -EINVAL; goto release_task; } From 2336493839ce5f6c5c75a8684bcf64dcf3d3524a Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:31 -0700 Subject: [PATCH 268/321] proc/smaps: add PMDMappable field to smaps Add PMDMappable field to smaps output which informs the user if memory in the VMA can be PMD-mapped by MADV_COLLAPSE. The distinction from THPeligible is needed for two reasons: 1) For THP, MADV_COLLAPSE is not coupled to THP sysfs controls, which THPeligible reports. 2) PMDMappable can also be used in HugeTLB fine-granularity mappings, which are independent from THP. Link: https://lkml.kernel.org/r/20220706235936.2197195-14-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 10 ++++++++-- fs/proc/task_mmu.c | 2 ++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 47e95dbc820d5..f207903a57a53 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -466,6 +466,7 @@ Memory Area, or VMA) there is a series of lines such as the following:: MMUPageSize: 4 kB Locked: 0 kB THPeligible: 0 + PMDMappable: 0 VmFlags: rd ex mr mw me dw The first of these lines shows the same information as is displayed for the @@ -518,9 +519,14 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. does not take into account swapped out page of underlying shmem objects. "Locked" indicates whether the mapping is locked in memory or not. +"PMDMappable" indicates if the memory can be mapped by PMDs - 1 if true, 0 +otherwise. It just shows the current status. Note that this is memory +operable on explicitly by MADV_COLLAPSE. + "THPeligible" indicates whether the mapping is eligible for allocating THP -pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise. -It just shows the current status. +pages by the kernel, as well as the THP is PMD mappable or not - 1 if true, 0 +otherwise. It just shows the current status. Note this is memory the kernel can +transparently provide as THPs. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f8cd58846a28b..29f2089456ba5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -867,6 +867,8 @@ static int show_smap(struct seq_file *m, void *v) seq_printf(m, "THPeligible: %d\n", hugepage_vma_check(vma, vma->vm_flags, true, false, true)); + seq_printf(m, "PMDMappable: %d\n", + hugepage_vma_check(vma, vma->vm_flags, true, false, false)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); From 9b3adb2afced36243d67a18f1ef3ff9c210709da Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:32 -0700 Subject: [PATCH 269/321] selftests/vm: modularize collapse selftests Modularize the collapse action of khugepaged collapse selftests by introducing a struct collapse_context which specifies how to collapse a given memory range and the expected semantics of the collapse. This can be reused later to test other collapse contexts. Additionally, all tests have logic that checks if a collapse occurred via reading /proc/self/smaps, and report if this is different than expected. Move this logic into the per-context ->collapse() hook instead of repeating it in every test. Link: https://lkml.kernel.org/r/20220706235936.2197195-15-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/khugepaged.c | 251 +++++++++++------------- 1 file changed, 110 insertions(+), 141 deletions(-) diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c index 155120b67a165..0f1bee0eff24e 100644 --- a/tools/testing/selftests/vm/khugepaged.c +++ b/tools/testing/selftests/vm/khugepaged.c @@ -23,6 +23,11 @@ static int hpage_pmd_nr; #define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/" #define PID_SMAPS "/proc/self/smaps" +struct collapse_context { + void (*collapse)(const char *msg, char *p, bool expect); + bool enforce_pte_scan_limits; +}; + enum thp_enabled { THP_ALWAYS, THP_MADVISE, @@ -501,6 +506,21 @@ static bool wait_for_scan(const char *msg, char *p) return timeout == -1; } +static void khugepaged_collapse(const char *msg, char *p, bool expect) +{ + if (wait_for_scan(msg, p)) { + if (expect) + fail("Timeout"); + else + success("OK"); + return; + } else if (check_huge(p) == expect) { + success("OK"); + } else { + fail("Fail"); + } +} + static void alloc_at_fault(void) { struct settings settings = default_settings; @@ -528,53 +548,39 @@ static void alloc_at_fault(void) munmap(p, hpage_pmd_size); } -static void collapse_full(void) +static void collapse_full(struct collapse_context *c) { void *p; p = alloc_mapping(); fill_memory(p, 0, hpage_pmd_size); - if (wait_for_scan("Collapse fully populated PTE table", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse fully populated PTE table", p, true); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); } -static void collapse_empty(void) +static void collapse_empty(struct collapse_context *c) { void *p; p = alloc_mapping(); - if (wait_for_scan("Do not collapse empty PTE table", p)) - fail("Timeout"); - else if (check_huge(p)) - fail("Fail"); - else - success("OK"); + c->collapse("Do not collapse empty PTE table", p, false); munmap(p, hpage_pmd_size); } -static void collapse_single_pte_entry(void) +static void collapse_single_pte_entry(struct collapse_context *c) { void *p; p = alloc_mapping(); fill_memory(p, 0, page_size); - if (wait_for_scan("Collapse PTE table with single PTE entry present", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table with single PTE entry present", p, + true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); } -static void collapse_max_ptes_none(void) +static void collapse_max_ptes_none(struct collapse_context *c) { int max_ptes_none = hpage_pmd_nr / 2; struct settings settings = default_settings; @@ -586,28 +592,22 @@ static void collapse_max_ptes_none(void) p = alloc_mapping(); fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); - if (wait_for_scan("Do not collapse with max_ptes_none exceeded", p)) - fail("Timeout"); - else if (check_huge(p)) - fail("Fail"); - else - success("OK"); + c->collapse("Maybe collapse with max_ptes_none exceeded", p, + !c->enforce_pte_scan_limits); validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); - fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); - if (wait_for_scan("Collapse with max_ptes_none PTEs empty", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); - validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); + if (c->enforce_pte_scan_limits) { + fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); + c->collapse("Collapse with max_ptes_none PTEs empty", p, true); + validate_memory(p, 0, + (hpage_pmd_nr - max_ptes_none) * page_size); + } munmap(p, hpage_pmd_size); write_settings(&default_settings); } -static void collapse_swapin_single_pte(void) +static void collapse_swapin_single_pte(struct collapse_context *c) { void *p; p = alloc_mapping(); @@ -625,18 +625,13 @@ static void collapse_swapin_single_pte(void) goto out; } - if (wait_for_scan("Collapse with swapping in single PTE entry", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse with swapping in single PTE entry", p, true); validate_memory(p, 0, hpage_pmd_size); out: munmap(p, hpage_pmd_size); } -static void collapse_max_ptes_swap(void) +static void collapse_max_ptes_swap(struct collapse_context *c) { int max_ptes_swap = read_num("khugepaged/max_ptes_swap"); void *p; @@ -656,39 +651,34 @@ static void collapse_max_ptes_swap(void) goto out; } - if (wait_for_scan("Do not collapse with max_ptes_swap exceeded", p)) - fail("Timeout"); - else if (check_huge(p)) - fail("Fail"); - else - success("OK"); + c->collapse("Maybe collapse with max_ptes_swap exceeded", p, + !c->enforce_pte_scan_limits); validate_memory(p, 0, hpage_pmd_size); - fill_memory(p, 0, hpage_pmd_size); - printf("Swapout %d of %d pages...", max_ptes_swap, hpage_pmd_nr); - if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) { - perror("madvise(MADV_PAGEOUT)"); - exit(EXIT_FAILURE); - } - if (check_swap(p, max_ptes_swap * page_size)) { - success("OK"); - } else { - fail("Fail"); - goto out; - } + if (c->enforce_pte_scan_limits) { + fill_memory(p, 0, hpage_pmd_size); + printf("Swapout %d of %d pages...", max_ptes_swap, + hpage_pmd_nr); + if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) { + perror("madvise(MADV_PAGEOUT)"); + exit(EXIT_FAILURE); + } + if (check_swap(p, max_ptes_swap * page_size)) { + success("OK"); + } else { + fail("Fail"); + goto out; + } - if (wait_for_scan("Collapse with max_ptes_swap pages swapped out", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); - validate_memory(p, 0, hpage_pmd_size); + c->collapse("Collapse with max_ptes_swap pages swapped out", p, + true); + validate_memory(p, 0, hpage_pmd_size); + } out: munmap(p, hpage_pmd_size); } -static void collapse_single_pte_entry_compound(void) +static void collapse_single_pte_entry_compound(struct collapse_context *c) { void *p; @@ -710,17 +700,13 @@ static void collapse_single_pte_entry_compound(void) else fail("Fail"); - if (wait_for_scan("Collapse PTE table with single PTE mapping compound page", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table with single PTE mapping compound page", + p, true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); } -static void collapse_full_of_compound(void) +static void collapse_full_of_compound(struct collapse_context *c) { void *p; @@ -742,17 +728,12 @@ static void collapse_full_of_compound(void) else fail("Fail"); - if (wait_for_scan("Collapse PTE table full of compound pages", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table full of compound pages", p, true); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); } -static void collapse_compound_extreme(void) +static void collapse_compound_extreme(struct collapse_context *c) { void *p; int i; @@ -798,18 +779,14 @@ static void collapse_compound_extreme(void) else fail("Fail"); - if (wait_for_scan("Collapse PTE table full of different compound pages", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table full of different compound pages", p, + true); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); } -static void collapse_fork(void) +static void collapse_fork(struct collapse_context *c) { int wstatus; void *p; @@ -835,13 +812,8 @@ static void collapse_fork(void) fail("Fail"); fill_memory(p, page_size, 2 * page_size); - - if (wait_for_scan("Collapse PTE table with single page shared with parent process", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table with single page shared with parent process", + p, true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); @@ -860,7 +832,7 @@ static void collapse_fork(void) munmap(p, hpage_pmd_size); } -static void collapse_fork_compound(void) +static void collapse_fork_compound(struct collapse_context *c) { int wstatus; void *p; @@ -896,14 +868,10 @@ static void collapse_fork_compound(void) fill_memory(p, 0, page_size); write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); - if (wait_for_scan("Collapse PTE table full of compound pages in child", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Collapse PTE table full of compound pages in child", + p, true); write_num("khugepaged/max_ptes_shared", - default_settings.khugepaged.max_ptes_shared); + default_settings.khugepaged.max_ptes_shared); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); @@ -922,7 +890,7 @@ static void collapse_fork_compound(void) munmap(p, hpage_pmd_size); } -static void collapse_max_ptes_shared() +static void collapse_max_ptes_shared(struct collapse_context *c) { int max_ptes_shared = read_num("khugepaged/max_ptes_shared"); int wstatus; @@ -957,28 +925,22 @@ static void collapse_max_ptes_shared() else fail("Fail"); - if (wait_for_scan("Do not collapse with max_ptes_shared exceeded", p)) - fail("Timeout"); - else if (!check_huge(p)) - success("OK"); - else - fail("Fail"); - - printf("Trigger CoW on page %d of %d...", - hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr); - fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared) * page_size); - if (!check_huge(p)) - success("OK"); - else - fail("Fail"); - - - if (wait_for_scan("Collapse with max_ptes_shared PTEs shared", p)) - fail("Timeout"); - else if (check_huge(p)) - success("OK"); - else - fail("Fail"); + c->collapse("Maybe collapse with max_ptes_shared exceeded", p, + !c->enforce_pte_scan_limits); + + if (c->enforce_pte_scan_limits) { + printf("Trigger CoW on page %d of %d...", + hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr); + fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared) * + page_size); + if (!check_huge(p)) + success("OK"); + else + fail("Fail"); + + c->collapse("Collapse with max_ptes_shared PTEs shared", + p, true); + } validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); @@ -999,6 +961,8 @@ static void collapse_max_ptes_shared() int main(void) { + struct collapse_context c; + setbuf(stdout, NULL); page_size = getpagesize(); @@ -1014,18 +978,23 @@ int main(void) adjust_settings(); alloc_at_fault(); - collapse_full(); - collapse_empty(); - collapse_single_pte_entry(); - collapse_max_ptes_none(); - collapse_swapin_single_pte(); - collapse_max_ptes_swap(); - collapse_single_pte_entry_compound(); - collapse_full_of_compound(); - collapse_compound_extreme(); - collapse_fork(); - collapse_fork_compound(); - collapse_max_ptes_shared(); + + printf("\n*** Testing context: khugepaged ***\n"); + c.collapse = &khugepaged_collapse; + c.enforce_pte_scan_limits = true; + + collapse_full(&c); + collapse_empty(&c); + collapse_single_pte_entry(&c); + collapse_max_ptes_none(&c); + collapse_swapin_single_pte(&c); + collapse_max_ptes_swap(&c); + collapse_single_pte_entry_compound(&c); + collapse_full_of_compound(&c); + collapse_compound_extreme(&c); + collapse_fork(&c); + collapse_fork_compound(&c); + collapse_max_ptes_shared(&c); restore_settings(0); } From eb0d3a0545085e316fc4737e6873a9e5374c3e9c Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:33 -0700 Subject: [PATCH 270/321] selftests/vm: dedup hugepage allocation logic The code p = alloc_mapping(); printf("Allocate huge page..."); madvise(p, hpage_pmd_size, MADV_HUGEPAGE); fill_memory(p, 0, hpage_pmd_size); if (check_huge(p)) success("OK"); else fail("Fail"); Is repeated many times in different tests. Add a helper, alloc_hpage() to handle this. Link: https://lkml.kernel.org/r/20220706235936.2197195-16-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/khugepaged.c | 62 +++++++++---------------- 1 file changed, 23 insertions(+), 39 deletions(-) diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c index 0f1bee0eff24e..eb6f5bbacff13 100644 --- a/tools/testing/selftests/vm/khugepaged.c +++ b/tools/testing/selftests/vm/khugepaged.c @@ -461,6 +461,25 @@ static void fill_memory(int *p, unsigned long start, unsigned long end) p[i * page_size / sizeof(*p)] = i + 0xdead0000; } +/* + * Returns pmd-mapped hugepage in VMA marked VM_HUGEPAGE, filled with + * validate_memory()'able contents. + */ +static void *alloc_hpage(void) +{ + void *p; + + p = alloc_mapping(); + printf("Allocate huge page..."); + madvise(p, hpage_pmd_size, MADV_HUGEPAGE); + fill_memory(p, 0, hpage_pmd_size); + if (check_huge(p)) + success("OK"); + else + fail("Fail"); + return p; +} + static void validate_memory(int *p, unsigned long start, unsigned long end) { int i; @@ -682,15 +701,7 @@ static void collapse_single_pte_entry_compound(struct collapse_context *c) { void *p; - p = alloc_mapping(); - - printf("Allocate huge page..."); - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); - fill_memory(p, 0, hpage_pmd_size); - if (check_huge(p)) - success("OK"); - else - fail("Fail"); + p = alloc_hpage(); madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); printf("Split huge page leaving single PTE mapping compound page..."); @@ -710,16 +721,7 @@ static void collapse_full_of_compound(struct collapse_context *c) { void *p; - p = alloc_mapping(); - - printf("Allocate huge page..."); - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); - fill_memory(p, 0, hpage_pmd_size); - if (check_huge(p)) - success("OK"); - else - fail("Fail"); - + p = alloc_hpage(); printf("Split huge page leaving single PTE page table full of compound pages..."); madvise(p, page_size, MADV_NOHUGEPAGE); madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); @@ -837,16 +839,7 @@ static void collapse_fork_compound(struct collapse_context *c) int wstatus; void *p; - p = alloc_mapping(); - - printf("Allocate huge page..."); - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); - fill_memory(p, 0, hpage_pmd_size); - if (check_huge(p)) - success("OK"); - else - fail("Fail"); - + p = alloc_hpage(); printf("Share huge page over fork()..."); if (!fork()) { /* Do not touch settings on child exit */ @@ -896,16 +889,7 @@ static void collapse_max_ptes_shared(struct collapse_context *c) int wstatus; void *p; - p = alloc_mapping(); - - printf("Allocate huge page..."); - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); - fill_memory(p, 0, hpage_pmd_size); - if (check_huge(p)) - success("OK"); - else - fail("Fail"); - + p = alloc_hpage(); printf("Share huge page over fork()..."); if (!fork()) { /* Do not touch settings on child exit */ From bce69e53e62298f83ec1194caeee98f8bcef1fb2 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:34 -0700 Subject: [PATCH 271/321] selftests/vm: add MADV_COLLAPSE collapse context to selftests Add madvise collapse context to hugepage collapse selftests. This context is tested with /sys/kernel/mm/transparent_hugepage/enabled set to "never" in order to avoid unwanted interaction with khugepaged during testing. Also, refactor updates to sysfs THP settings using a stack so that the THP settings from nested callers can be restored. Link: https://lkml.kernel.org/r/20220706235936.2197195-17-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/khugepaged.c | 171 +++++++++++++++++------- 1 file changed, 125 insertions(+), 46 deletions(-) diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c index eb6f5bbacff13..780f04440e154 100644 --- a/tools/testing/selftests/vm/khugepaged.c +++ b/tools/testing/selftests/vm/khugepaged.c @@ -14,6 +14,9 @@ #ifndef MADV_PAGEOUT #define MADV_PAGEOUT 21 #endif +#ifndef MADV_COLLAPSE +#define MADV_COLLAPSE 25 +#endif #define BASE_ADDR ((void *)(1UL << 30)) static unsigned long hpage_pmd_size; @@ -95,18 +98,6 @@ struct settings { struct khugepaged_settings khugepaged; }; -static struct settings default_settings = { - .thp_enabled = THP_MADVISE, - .thp_defrag = THP_DEFRAG_ALWAYS, - .shmem_enabled = SHMEM_NEVER, - .use_zero_page = 0, - .khugepaged = { - .defrag = 1, - .alloc_sleep_millisecs = 10, - .scan_sleep_millisecs = 10, - }, -}; - static struct settings saved_settings; static bool skip_settings_restore; @@ -284,6 +275,39 @@ static void write_settings(struct settings *settings) write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan); } +#define MAX_SETTINGS_DEPTH 4 +static struct settings settings_stack[MAX_SETTINGS_DEPTH]; +static int settings_index; + +static struct settings *current_settings(void) +{ + if (!settings_index) { + printf("Fail: No settings set"); + exit(EXIT_FAILURE); + } + return settings_stack + settings_index - 1; +} + +static void push_settings(struct settings *settings) +{ + if (settings_index >= MAX_SETTINGS_DEPTH) { + printf("Fail: Settings stack exceeded"); + exit(EXIT_FAILURE); + } + settings_stack[settings_index++] = *settings; + write_settings(current_settings()); +} + +static void pop_settings(void) +{ + if (settings_index <= 0) { + printf("Fail: Settings stack empty"); + exit(EXIT_FAILURE); + } + --settings_index; + write_settings(current_settings()); +} + static void restore_settings(int sig) { if (skip_settings_restore) @@ -327,14 +351,6 @@ static void save_settings(void) signal(SIGQUIT, restore_settings); } -static void adjust_settings(void) -{ - - printf("Adjust settings..."); - write_settings(&default_settings); - success("OK"); -} - #define MAX_LINE_LENGTH 500 static bool check_for_pattern(FILE *fp, char *pattern, char *buf) @@ -493,6 +509,38 @@ static void validate_memory(int *p, unsigned long start, unsigned long end) } } +static void madvise_collapse(const char *msg, char *p, bool expect) +{ + int ret; + struct settings settings = *current_settings(); + + printf("%s...", msg); + /* Sanity check */ + if (check_huge(p)) { + printf("Unexpected huge page\n"); + exit(EXIT_FAILURE); + } + + /* + * Prevent khugepaged interference and tests that MADV_COLLAPSE + * ignores /sys/kernel/mm/transparent_hugepage/enabled + */ + settings.thp_enabled = THP_NEVER; + push_settings(&settings); + + /* Clear VM_NOHUGEPAGE */ + madvise(p, hpage_pmd_size, MADV_HUGEPAGE); + ret = madvise(p, hpage_pmd_size, MADV_COLLAPSE); + if (((bool)ret) == expect) + fail("Fail: Bad return value"); + else if (check_huge(p) != expect) + fail("Fail: check_huge()"); + else + success("OK"); + + pop_settings(); +} + #define TICK 500000 static bool wait_for_scan(const char *msg, char *p) { @@ -542,11 +590,11 @@ static void khugepaged_collapse(const char *msg, char *p, bool expect) static void alloc_at_fault(void) { - struct settings settings = default_settings; + struct settings settings = *current_settings(); char *p; settings.thp_enabled = THP_ALWAYS; - write_settings(&settings); + push_settings(&settings); p = alloc_mapping(); *p = 1; @@ -556,7 +604,7 @@ static void alloc_at_fault(void) else fail("Fail"); - write_settings(&default_settings); + pop_settings(); madvise(p, page_size, MADV_DONTNEED); printf("Split huge PMD on MADV_DONTNEED..."); @@ -602,11 +650,11 @@ static void collapse_single_pte_entry(struct collapse_context *c) static void collapse_max_ptes_none(struct collapse_context *c) { int max_ptes_none = hpage_pmd_nr / 2; - struct settings settings = default_settings; + struct settings settings = *current_settings(); void *p; settings.khugepaged.max_ptes_none = max_ptes_none; - write_settings(&settings); + push_settings(&settings); p = alloc_mapping(); @@ -623,7 +671,7 @@ static void collapse_max_ptes_none(struct collapse_context *c) } munmap(p, hpage_pmd_size); - write_settings(&default_settings); + pop_settings(); } static void collapse_swapin_single_pte(struct collapse_context *c) @@ -703,7 +751,6 @@ static void collapse_single_pte_entry_compound(struct collapse_context *c) p = alloc_hpage(); madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); - printf("Split huge page leaving single PTE mapping compound page..."); madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED); if (!check_huge(p)) @@ -864,7 +911,7 @@ static void collapse_fork_compound(struct collapse_context *c) c->collapse("Collapse PTE table full of compound pages in child", p, true); write_num("khugepaged/max_ptes_shared", - default_settings.khugepaged.max_ptes_shared); + current_settings()->khugepaged.max_ptes_shared); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); @@ -943,9 +990,21 @@ static void collapse_max_ptes_shared(struct collapse_context *c) munmap(p, hpage_pmd_size); } -int main(void) +int main(int argc, const char **argv) { struct collapse_context c; + struct settings default_settings = { + .thp_enabled = THP_MADVISE, + .thp_defrag = THP_DEFRAG_ALWAYS, + .shmem_enabled = SHMEM_NEVER, + .use_zero_page = 0, + .khugepaged = { + .defrag = 1, + .alloc_sleep_millisecs = 10, + .scan_sleep_millisecs = 10, + }, + }; + const char *tests = argc == 1 ? "all" : argv[1]; setbuf(stdout, NULL); @@ -959,26 +1018,46 @@ int main(void) default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; save_settings(); - adjust_settings(); + push_settings(&default_settings); alloc_at_fault(); - printf("\n*** Testing context: khugepaged ***\n"); - c.collapse = &khugepaged_collapse; - c.enforce_pte_scan_limits = true; - - collapse_full(&c); - collapse_empty(&c); - collapse_single_pte_entry(&c); - collapse_max_ptes_none(&c); - collapse_swapin_single_pte(&c); - collapse_max_ptes_swap(&c); - collapse_single_pte_entry_compound(&c); - collapse_full_of_compound(&c); - collapse_compound_extreme(&c); - collapse_fork(&c); - collapse_fork_compound(&c); - collapse_max_ptes_shared(&c); + if (!strcmp(tests, "khugepaged") || !strcmp(tests, "all")) { + printf("\n*** Testing context: khugepaged ***\n"); + c.collapse = &khugepaged_collapse; + c.enforce_pte_scan_limits = true; + + collapse_full(&c); + collapse_empty(&c); + collapse_single_pte_entry(&c); + collapse_max_ptes_none(&c); + collapse_swapin_single_pte(&c); + collapse_max_ptes_swap(&c); + collapse_single_pte_entry_compound(&c); + collapse_full_of_compound(&c); + collapse_compound_extreme(&c); + collapse_fork(&c); + collapse_fork_compound(&c); + collapse_max_ptes_shared(&c); + } + if (!strcmp(tests, "madvise") || !strcmp(tests, "all")) { + printf("\n*** Testing context: madvise ***\n"); + c.collapse = &madvise_collapse; + c.enforce_pte_scan_limits = false; + + collapse_full(&c); + collapse_empty(&c); + collapse_single_pte_entry(&c); + collapse_max_ptes_none(&c); + collapse_swapin_single_pte(&c); + collapse_max_ptes_swap(&c); + collapse_single_pte_entry_compound(&c); + collapse_full_of_compound(&c); + collapse_compound_extreme(&c); + collapse_fork(&c); + collapse_fork_compound(&c); + collapse_max_ptes_shared(&c); + } restore_settings(0); } From ce1465bc3cf5f2a980c4346fa496c2061291b643 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:35 -0700 Subject: [PATCH 272/321] selftests/vm: add selftest to verify recollapse of THPs Add selftest specific to madvise collapse context that tests MADV_COLLAPSE is "successful" if a hugepage-aligned/sized region is already pmd-mapped. This test also verifies that MADV_COLLAPSE can collapse memory into THPs even in "madvise" THP mode and the memory isn't marked VM_HUGEPAGE. Link: https://lkml.kernel.org/r/20220706235936.2197195-18-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/khugepaged.c | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c index 780f04440e154..87cd0b99477f0 100644 --- a/tools/testing/selftests/vm/khugepaged.c +++ b/tools/testing/selftests/vm/khugepaged.c @@ -990,6 +990,36 @@ static void collapse_max_ptes_shared(struct collapse_context *c) munmap(p, hpage_pmd_size); } +static void madvise_collapse_existing_thps(void) +{ + void *p; + int err; + + p = alloc_mapping(); + fill_memory(p, 0, hpage_pmd_size); + + printf("Collapse fully populated PTE table..."); + /* + * Note that we don't set MADV_HUGEPAGE here, which + * also tests that VM_HUGEPAGE isn't required for + * MADV_COLLAPSE in "madvise" mode. + */ + err = madvise(p, hpage_pmd_size, MADV_COLLAPSE); + if (err == 0 && check_huge(p)) { + success("OK"); + printf("Re-collapse PMD-mapped hugepage"); + err = madvise(p, hpage_pmd_size, MADV_COLLAPSE); + if (err == 0 && check_huge(p)) + success("OK"); + else + fail("Fail"); + } else { + fail("Fail"); + } + validate_memory(p, 0, hpage_pmd_size); + munmap(p, hpage_pmd_size); +} + int main(int argc, const char **argv) { struct collapse_context c; @@ -1057,6 +1087,7 @@ int main(int argc, const char **argv) collapse_fork(&c); collapse_fork_compound(&c); collapse_max_ptes_shared(&c); + madvise_collapse_existing_thps(); } restore_settings(0); From 0e9f775fb0b4adf1a4ef50ea156ead09a0fc5194 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Wed, 6 Jul 2022 16:59:36 -0700 Subject: [PATCH 273/321] selftests/vm: add selftest to verify multi THP collapse Add support to allocate and verify collapse of multiple hugepage-sized regions into multiple THPs. Add "nr" argument to check_huge() that instructs check_huge() to check for exactly "nr_hpages" THPs. This has the added benefit of now being able to check for exactly 0 THPs, and so callsites that previously checked the negation of exactly 1 THP are now more correct. ->collapse struct collapse_context hook has been expanded with a "nr_hpages" argument to collapse "nr_hpages" hugepages. The collapse_full() test has been repurposed to collapse 4 THPs at once. It is expected more tests will want to test multi THP collapse (e.g. file/shmem). This is of particular benefit to madvise collapse context given that it may do many THP collapses during a single syscall. Link: https://lkml.kernel.org/r/20220706235936.2197195-19-zokeefe@google.com Signed-off-by: Zach O'Keefe Cc: Alex Shi Cc: Andrea Arcangeli Cc: Arnd Bergmann Cc: Axel Rasmussen Cc: Chris Kennelly Cc: Chris Zankel Cc: David Hildenbrand Cc: David Rientjes Cc: Helge Deller Cc: Hugh Dickins Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Miaohe Lin Cc: Michal Hocko Cc: Minchan Kim Cc: Pasha Tatashin Cc: Pavel Begunkov Cc: Peter Xu Cc: Rongwei Wang Cc: SeongJae Park Cc: Song Liu Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/khugepaged.c | 140 ++++++++++++------------ 1 file changed, 73 insertions(+), 67 deletions(-) diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c index 87cd0b99477f0..b77b1e28cdb38 100644 --- a/tools/testing/selftests/vm/khugepaged.c +++ b/tools/testing/selftests/vm/khugepaged.c @@ -27,7 +27,7 @@ static int hpage_pmd_nr; #define PID_SMAPS "/proc/self/smaps" struct collapse_context { - void (*collapse)(const char *msg, char *p, bool expect); + void (*collapse)(const char *msg, char *p, int nr_hpages, bool expect); bool enforce_pte_scan_limits; }; @@ -362,7 +362,7 @@ static bool check_for_pattern(FILE *fp, char *pattern, char *buf) return false; } -static bool check_huge(void *addr) +static bool check_huge(void *addr, int nr_hpages) { bool thp = false; int ret; @@ -387,7 +387,7 @@ static bool check_huge(void *addr) goto err_out; ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "AnonHugePages:%10ld kB", - hpage_pmd_size >> 10); + nr_hpages * (hpage_pmd_size >> 10)); if (ret >= MAX_LINE_LENGTH) { printf("%s: Pattern is too long\n", __func__); exit(EXIT_FAILURE); @@ -455,12 +455,12 @@ static bool check_swap(void *addr, unsigned long size) return swap; } -static void *alloc_mapping(void) +static void *alloc_mapping(int nr) { void *p; - p = mmap(BASE_ADDR, hpage_pmd_size, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + p = mmap(BASE_ADDR, nr * hpage_pmd_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (p != BASE_ADDR) { printf("Failed to allocate VMA at %p\n", BASE_ADDR); exit(EXIT_FAILURE); @@ -485,11 +485,11 @@ static void *alloc_hpage(void) { void *p; - p = alloc_mapping(); + p = alloc_mapping(1); printf("Allocate huge page..."); madvise(p, hpage_pmd_size, MADV_HUGEPAGE); fill_memory(p, 0, hpage_pmd_size); - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -509,14 +509,15 @@ static void validate_memory(int *p, unsigned long start, unsigned long end) } } -static void madvise_collapse(const char *msg, char *p, bool expect) +static void madvise_collapse(const char *msg, char *p, int nr_hpages, + bool expect) { int ret; struct settings settings = *current_settings(); printf("%s...", msg); /* Sanity check */ - if (check_huge(p)) { + if (!check_huge(p, 0)) { printf("Unexpected huge page\n"); exit(EXIT_FAILURE); } @@ -529,11 +530,11 @@ static void madvise_collapse(const char *msg, char *p, bool expect) push_settings(&settings); /* Clear VM_NOHUGEPAGE */ - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); - ret = madvise(p, hpage_pmd_size, MADV_COLLAPSE); + madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); + ret = madvise(p, nr_hpages * hpage_pmd_size, MADV_COLLAPSE); if (((bool)ret) == expect) fail("Fail: Bad return value"); - else if (check_huge(p) != expect) + else if (check_huge(p, nr_hpages) != expect) fail("Fail: check_huge()"); else success("OK"); @@ -542,25 +543,25 @@ static void madvise_collapse(const char *msg, char *p, bool expect) } #define TICK 500000 -static bool wait_for_scan(const char *msg, char *p) +static bool wait_for_scan(const char *msg, char *p, int nr_hpages) { int full_scans; int timeout = 6; /* 3 seconds */ /* Sanity check */ - if (check_huge(p)) { + if (!check_huge(p, 0)) { printf("Unexpected huge page\n"); exit(EXIT_FAILURE); } - madvise(p, hpage_pmd_size, MADV_HUGEPAGE); + madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); /* Wait until the second full_scan completed */ full_scans = read_num("khugepaged/full_scans") + 2; printf("%s...", msg); while (timeout--) { - if (check_huge(p)) + if (check_huge(p, nr_hpages)) break; if (read_num("khugepaged/full_scans") >= full_scans) break; @@ -568,20 +569,21 @@ static bool wait_for_scan(const char *msg, char *p) usleep(TICK); } - madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); + madvise(p, nr_hpages * hpage_pmd_size, MADV_NOHUGEPAGE); return timeout == -1; } -static void khugepaged_collapse(const char *msg, char *p, bool expect) +static void khugepaged_collapse(const char *msg, char *p, int nr_hpages, + bool expect) { - if (wait_for_scan(msg, p)) { + if (wait_for_scan(msg, p, nr_hpages)) { if (expect) fail("Timeout"); else success("OK"); return; - } else if (check_huge(p) == expect) { + } else if (check_huge(p, nr_hpages) == expect) { success("OK"); } else { fail("Fail"); @@ -596,10 +598,10 @@ static void alloc_at_fault(void) settings.thp_enabled = THP_ALWAYS; push_settings(&settings); - p = alloc_mapping(); + p = alloc_mapping(1); *p = 1; printf("Allocate huge page on fault..."); - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -608,7 +610,7 @@ static void alloc_at_fault(void) madvise(p, page_size, MADV_DONTNEED); printf("Split huge PMD on MADV_DONTNEED..."); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); @@ -618,20 +620,23 @@ static void alloc_at_fault(void) static void collapse_full(struct collapse_context *c) { void *p; + int nr_hpages = 4; + unsigned long size = nr_hpages * hpage_pmd_size; - p = alloc_mapping(); - fill_memory(p, 0, hpage_pmd_size); - c->collapse("Collapse fully populated PTE table", p, true); - validate_memory(p, 0, hpage_pmd_size); - munmap(p, hpage_pmd_size); + p = alloc_mapping(nr_hpages); + fill_memory(p, 0, size); + c->collapse("Collapse multiple fully populated PTE table", p, nr_hpages, + true); + validate_memory(p, 0, size); + munmap(p, size); } static void collapse_empty(struct collapse_context *c) { void *p; - p = alloc_mapping(); - c->collapse("Do not collapse empty PTE table", p, false); + p = alloc_mapping(1); + c->collapse("Do not collapse empty PTE table", p, 1, false); munmap(p, hpage_pmd_size); } @@ -639,10 +644,10 @@ static void collapse_single_pte_entry(struct collapse_context *c) { void *p; - p = alloc_mapping(); + p = alloc_mapping(1); fill_memory(p, 0, page_size); c->collapse("Collapse PTE table with single PTE entry present", p, - true); + 1, true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); } @@ -656,16 +661,17 @@ static void collapse_max_ptes_none(struct collapse_context *c) settings.khugepaged.max_ptes_none = max_ptes_none; push_settings(&settings); - p = alloc_mapping(); + p = alloc_mapping(1); fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); - c->collapse("Maybe collapse with max_ptes_none exceeded", p, + c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1, !c->enforce_pte_scan_limits); validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); if (c->enforce_pte_scan_limits) { fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); - c->collapse("Collapse with max_ptes_none PTEs empty", p, true); + c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, + true); validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); } @@ -677,7 +683,7 @@ static void collapse_max_ptes_none(struct collapse_context *c) static void collapse_swapin_single_pte(struct collapse_context *c) { void *p; - p = alloc_mapping(); + p = alloc_mapping(1); fill_memory(p, 0, hpage_pmd_size); printf("Swapout one page..."); @@ -692,7 +698,7 @@ static void collapse_swapin_single_pte(struct collapse_context *c) goto out; } - c->collapse("Collapse with swapping in single PTE entry", p, true); + c->collapse("Collapse with swapping in single PTE entry", p, 1, true); validate_memory(p, 0, hpage_pmd_size); out: munmap(p, hpage_pmd_size); @@ -703,7 +709,7 @@ static void collapse_max_ptes_swap(struct collapse_context *c) int max_ptes_swap = read_num("khugepaged/max_ptes_swap"); void *p; - p = alloc_mapping(); + p = alloc_mapping(1); fill_memory(p, 0, hpage_pmd_size); printf("Swapout %d of %d pages...", max_ptes_swap + 1, hpage_pmd_nr); @@ -718,7 +724,7 @@ static void collapse_max_ptes_swap(struct collapse_context *c) goto out; } - c->collapse("Maybe collapse with max_ptes_swap exceeded", p, + c->collapse("Maybe collapse with max_ptes_swap exceeded", p, 1, !c->enforce_pte_scan_limits); validate_memory(p, 0, hpage_pmd_size); @@ -738,7 +744,7 @@ static void collapse_max_ptes_swap(struct collapse_context *c) } c->collapse("Collapse with max_ptes_swap pages swapped out", p, - true); + 1, true); validate_memory(p, 0, hpage_pmd_size); } out: @@ -753,13 +759,13 @@ static void collapse_single_pte_entry_compound(struct collapse_context *c) madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); printf("Split huge page leaving single PTE mapping compound page..."); madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); c->collapse("Collapse PTE table with single PTE mapping compound page", - p, true); + p, 1, true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); } @@ -772,12 +778,12 @@ static void collapse_full_of_compound(struct collapse_context *c) printf("Split huge page leaving single PTE page table full of compound pages..."); madvise(p, page_size, MADV_NOHUGEPAGE); madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); - c->collapse("Collapse PTE table full of compound pages", p, true); + c->collapse("Collapse PTE table full of compound pages", p, 1, true); validate_memory(p, 0, hpage_pmd_size); munmap(p, hpage_pmd_size); } @@ -787,14 +793,14 @@ static void collapse_compound_extreme(struct collapse_context *c) void *p; int i; - p = alloc_mapping(); + p = alloc_mapping(1); for (i = 0; i < hpage_pmd_nr; i++) { printf("\rConstruct PTE page table full of different PTE-mapped compound pages %3d/%d...", i + 1, hpage_pmd_nr); madvise(BASE_ADDR, hpage_pmd_size, MADV_HUGEPAGE); fill_memory(BASE_ADDR, 0, hpage_pmd_size); - if (!check_huge(BASE_ADDR)) { + if (!check_huge(BASE_ADDR, 1)) { printf("Failed to allocate huge page\n"); exit(EXIT_FAILURE); } @@ -823,12 +829,12 @@ static void collapse_compound_extreme(struct collapse_context *c) munmap(BASE_ADDR, hpage_pmd_size); fill_memory(p, 0, hpage_pmd_size); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); - c->collapse("Collapse PTE table full of different compound pages", p, + c->collapse("Collapse PTE table full of different compound pages", p, 1, true); validate_memory(p, 0, hpage_pmd_size); @@ -840,11 +846,11 @@ static void collapse_fork(struct collapse_context *c) int wstatus; void *p; - p = alloc_mapping(); + p = alloc_mapping(1); printf("Allocate small page..."); fill_memory(p, 0, page_size); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); @@ -855,14 +861,14 @@ static void collapse_fork(struct collapse_context *c) skip_settings_restore = true; exit_status = 0; - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); fill_memory(p, page_size, 2 * page_size); c->collapse("Collapse PTE table with single page shared with parent process", - p, true); + p, 1, true); validate_memory(p, 0, page_size); munmap(p, hpage_pmd_size); @@ -873,7 +879,7 @@ static void collapse_fork(struct collapse_context *c) exit_status += WEXITSTATUS(wstatus); printf("Check if parent still has small page..."); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); @@ -893,7 +899,7 @@ static void collapse_fork_compound(struct collapse_context *c) skip_settings_restore = true; exit_status = 0; - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -901,7 +907,7 @@ static void collapse_fork_compound(struct collapse_context *c) printf("Split huge page PMD in child process..."); madvise(p, page_size, MADV_NOHUGEPAGE); madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); @@ -909,7 +915,7 @@ static void collapse_fork_compound(struct collapse_context *c) write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); c->collapse("Collapse PTE table full of compound pages in child", - p, true); + p, 1, true); write_num("khugepaged/max_ptes_shared", current_settings()->khugepaged.max_ptes_shared); @@ -922,7 +928,7 @@ static void collapse_fork_compound(struct collapse_context *c) exit_status += WEXITSTATUS(wstatus); printf("Check if parent still has huge page..."); - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -943,7 +949,7 @@ static void collapse_max_ptes_shared(struct collapse_context *c) skip_settings_restore = true; exit_status = 0; - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -951,26 +957,26 @@ static void collapse_max_ptes_shared(struct collapse_context *c) printf("Trigger CoW on page %d of %d...", hpage_pmd_nr - max_ptes_shared - 1, hpage_pmd_nr); fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); c->collapse("Maybe collapse with max_ptes_shared exceeded", p, - !c->enforce_pte_scan_limits); + 1, !c->enforce_pte_scan_limits); if (c->enforce_pte_scan_limits) { printf("Trigger CoW on page %d of %d...", hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr); fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared) * page_size); - if (!check_huge(p)) + if (check_huge(p, 0)) success("OK"); else fail("Fail"); c->collapse("Collapse with max_ptes_shared PTEs shared", - p, true); + p, 1, true); } validate_memory(p, 0, hpage_pmd_size); @@ -982,7 +988,7 @@ static void collapse_max_ptes_shared(struct collapse_context *c) exit_status += WEXITSTATUS(wstatus); printf("Check if parent still has huge page..."); - if (check_huge(p)) + if (check_huge(p, 1)) success("OK"); else fail("Fail"); @@ -995,7 +1001,7 @@ static void madvise_collapse_existing_thps(void) void *p; int err; - p = alloc_mapping(); + p = alloc_mapping(1); fill_memory(p, 0, hpage_pmd_size); printf("Collapse fully populated PTE table..."); @@ -1005,11 +1011,11 @@ static void madvise_collapse_existing_thps(void) * MADV_COLLAPSE in "madvise" mode. */ err = madvise(p, hpage_pmd_size, MADV_COLLAPSE); - if (err == 0 && check_huge(p)) { + if (err == 0 && check_huge(p, 1)) { success("OK"); printf("Re-collapse PMD-mapped hugepage"); err = madvise(p, hpage_pmd_size, MADV_COLLAPSE); - if (err == 0 && check_huge(p)) + if (err == 0 && check_huge(p, 1)) success("OK"); else fail("Fail"); From 72eafe9e69e37d0fecf7265957b3a5eb7870b1eb Mon Sep 17 00:00:00 2001 From: Gang He Date: Fri, 8 Jul 2022 20:02:34 -0700 Subject: [PATCH 274/321] ocfs2: reflink deadlock when clone file to the same directory simultaneously Running reflink from multiple nodes simultaneously to clone a file to the same directory probably triggers a deadlock issue. For example, there is a three node ocfs2 cluster, each node mounts the ocfs2 file system to /mnt/shared, and run the reflink command from each node repeatedly, like reflink "/mnt/shared/test" \ "/mnt/shared/.snapshots/test.`date +%m%d%H%M%S`.`hostname`" then, reflink command process will be hung on each node, and you can't list this file system directory. The problematic reflink command process is blocked at one node, task:reflink state:D stack: 0 pid: 1283 ppid: 4154 Call Trace: __schedule+0x2fd/0x750 schedule+0x2f/0xa0 schedule_timeout+0x1cc/0x310 ? ocfs2_control_cfu+0x50/0x50 [ocfs2_stack_user] ? 0xffffffffc0e3e000 wait_for_completion+0xba/0x140 ? wake_up_q+0xa0/0xa0 __ocfs2_cluster_lock.isra.41+0x3b5/0x820 [ocfs2] ? ocfs2_inode_lock_full_nested+0x1fc/0x960 [ocfs2] ocfs2_inode_lock_full_nested+0x1fc/0x960 [ocfs2] ocfs2_init_security_and_acl+0xbe/0x1d0 [ocfs2] ocfs2_reflink+0x436/0x4c0 [ocfs2] ? ocfs2_reflink_ioctl+0x2ca/0x360 [ocfs2] ocfs2_reflink_ioctl+0x2ca/0x360 [ocfs2] ocfs2_ioctl+0x25e/0x670 [ocfs2] do_vfs_ioctl+0xa0/0x680 ksys_ioctl+0x70/0x80 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x5b/0x1e0 The other reflink command processes are blocked at other nodes, task:reflink state:D stack: 0 pid:29759 ppid: 4088 Call Trace: __schedule+0x2fd/0x750 schedule+0x2f/0xa0 schedule_timeout+0x1cc/0x310 ? ocfs2_control_cfu+0x50/0x50 [ocfs2_stack_user] ? 0xffffffffc0b19000 wait_for_completion+0xba/0x140 ? wake_up_q+0xa0/0xa0 __ocfs2_cluster_lock.isra.41+0x3b5/0x820 [ocfs2] ? ocfs2_inode_lock_full_nested+0x1fc/0x960 [ocfs2] ocfs2_inode_lock_full_nested+0x1fc/0x960 [ocfs2] ocfs2_mv_orphaned_inode_to_new+0x87/0x7e0 [ocfs2] ocfs2_reflink+0x335/0x4c0 [ocfs2] ? ocfs2_reflink_ioctl+0x2ca/0x360 [ocfs2] ocfs2_reflink_ioctl+0x2ca/0x360 [ocfs2] ocfs2_ioctl+0x25e/0x670 [ocfs2] do_vfs_ioctl+0xa0/0x680 ksys_ioctl+0x70/0x80 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x5b/0x1e0 or task:reflink state:D stack: 0 pid:18465 ppid: 4156 Call Trace: __schedule+0x302/0x940 ? usleep_range+0x80/0x80 schedule+0x46/0xb0 schedule_timeout+0xff/0x140 ? ocfs2_control_cfu+0x50/0x50 [ocfs2_stack_user] ? 0xffffffffc0c3b000 __wait_for_common+0xb9/0x170 __ocfs2_cluster_lock.constprop.0+0x1d6/0x860 [ocfs2] ? ocfs2_wait_for_recovery+0x49/0xd0 [ocfs2] ? ocfs2_inode_lock_full_nested+0x30f/0xa50 [ocfs2] ocfs2_inode_lock_full_nested+0x30f/0xa50 [ocfs2] ocfs2_inode_lock_tracker+0xf2/0x2b0 [ocfs2] ? dput+0x32/0x2f0 ocfs2_permission+0x45/0xe0 [ocfs2] inode_permission+0xcc/0x170 link_path_walk.part.0.constprop.0+0x2a2/0x380 ? path_init+0x2c1/0x3f0 path_parentat+0x3c/0x90 filename_parentat+0xc1/0x1d0 ? filename_lookup+0x138/0x1c0 filename_create+0x43/0x160 ocfs2_reflink_ioctl+0xe6/0x380 [ocfs2] ocfs2_ioctl+0x1ea/0x2c0 [ocfs2] ? do_sys_openat2+0x81/0x150 __x64_sys_ioctl+0x82/0xb0 do_syscall_64+0x61/0xb0 The deadlock is caused by multiple acquiring the destination directory inode dlm lock in ocfs2_reflink function, we should acquire this directory inode dlm lock at the beginning, and hold this dlm lock until end of the function. Link: https://lkml.kernel.org/r/20210729110230.18983-1-ghe@suse.com Signed-off-by: Gang He Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Joseph Qi Cc: Changwei Ge Cc: Gang He Cc: Jun Piao Signed-off-by: Andrew Morton --- fs/ocfs2/namei.c | 32 +++++++++++++------------------- fs/ocfs2/namei.h | 2 ++ fs/ocfs2/refcounttree.c | 15 +++++++++++---- fs/ocfs2/xattr.c | 12 +----------- fs/ocfs2/xattr.h | 1 + 5 files changed, 28 insertions(+), 34 deletions(-) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index c75fd54b91854..e3dd30dd3547f 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -2489,6 +2489,7 @@ static int ocfs2_prep_new_orphaned_file(struct inode *dir, } int ocfs2_create_inode_in_orphan(struct inode *dir, + struct buffer_head **dir_bh, int mode, struct inode **new_inode) { @@ -2597,13 +2598,16 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, brelse(new_di_bh); - if (!status) - *new_inode = inode; - ocfs2_free_dir_lookup_result(&orphan_insert); - ocfs2_inode_unlock(dir, 1); - brelse(parent_di_bh); + if (!status) { + *new_inode = inode; + *dir_bh = parent_di_bh; + } else { + ocfs2_inode_unlock(dir, 1); + brelse(parent_di_bh); + } + return status; } @@ -2760,11 +2764,11 @@ int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb, } int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, + struct buffer_head *dir_bh, struct inode *inode, struct dentry *dentry) { int status = 0; - struct buffer_head *parent_di_bh = NULL; handle_t *handle = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_dinode *dir_di, *di; @@ -2778,14 +2782,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); - status = ocfs2_inode_lock(dir, &parent_di_bh, 1); - if (status < 0) { - if (status != -ENOENT) - mlog_errno(status); - return status; - } - - dir_di = (struct ocfs2_dinode *) parent_di_bh->b_data; + dir_di = (struct ocfs2_dinode *) dir_bh->b_data; if (!dir_di->i_links_count) { /* can't make a file in a deleted directory. */ status = -ENOENT; @@ -2798,7 +2795,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, goto leave; /* get a spot inside the dir. */ - status = ocfs2_prepare_dir_for_insert(osb, dir, parent_di_bh, + status = ocfs2_prepare_dir_for_insert(osb, dir, dir_bh, dentry->d_name.name, dentry->d_name.len, &lookup); if (status < 0) { @@ -2862,7 +2859,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, ocfs2_journal_dirty(handle, di_bh); status = ocfs2_add_entry(handle, dentry, inode, - OCFS2_I(inode)->ip_blkno, parent_di_bh, + OCFS2_I(inode)->ip_blkno, dir_bh, &lookup); if (status < 0) { mlog_errno(status); @@ -2886,10 +2883,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, iput(orphan_dir_inode); leave: - ocfs2_inode_unlock(dir, 1); - brelse(di_bh); - brelse(parent_di_bh); brelse(orphan_dir_bh); ocfs2_free_dir_lookup_result(&lookup); diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h index 9cc891eb874e0..03a2c526e2c1b 100644 --- a/fs/ocfs2/namei.h +++ b/fs/ocfs2/namei.h @@ -24,6 +24,7 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, struct buffer_head *orphan_dir_bh, bool dio); int ocfs2_create_inode_in_orphan(struct inode *dir, + struct buffer_head **dir_bh, int mode, struct inode **new_inode); int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb, @@ -32,6 +33,7 @@ int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *di_bh, int update_isize, loff_t end); int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, + struct buffer_head *dir_bh, struct inode *new_inode, struct dentry *new_dentry); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index e04358a46b680..00ce8fe7e3237 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4252,7 +4252,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, { int error, had_lock; struct inode *inode = d_inode(old_dentry); - struct buffer_head *old_bh = NULL; + struct buffer_head *old_bh = NULL, *dir_bh = NULL; struct inode *new_orphan_inode = NULL; struct ocfs2_lock_holder oh; @@ -4260,7 +4260,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, return -EOPNOTSUPP; - error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, + error = ocfs2_create_inode_in_orphan(dir, &dir_bh, inode->i_mode, &new_orphan_inode); if (error) { mlog_errno(error); @@ -4306,13 +4306,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, /* If the security isn't preserved, we need to re-initialize them. */ if (!preserve) { - error = ocfs2_init_security_and_acl(dir, new_orphan_inode, + error = ocfs2_init_security_and_acl(dir, dir_bh, + new_orphan_inode, &new_dentry->d_name); if (error) mlog_errno(error); } if (!error) { - error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, + error = ocfs2_mv_orphaned_inode_to_new(dir, dir_bh, + new_orphan_inode, new_dentry); if (error) mlog_errno(error); @@ -4330,6 +4332,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, iput(new_orphan_inode); } + if (dir_bh) { + ocfs2_inode_unlock(dir, 1); + brelse(dir_bh); + } + return error; } diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 95d0611c5fc7d..3f23e3a5018ce 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -7203,16 +7203,13 @@ int ocfs2_reflink_xattrs(struct inode *old_inode, /* * Initialize security and acl for a already created inode. * Used for reflink a non-preserve-security file. - * - * It uses common api like ocfs2_xattr_set, so the caller - * must not hold any lock expect i_rwsem. */ int ocfs2_init_security_and_acl(struct inode *dir, + struct buffer_head *dir_bh, struct inode *inode, const struct qstr *qstr) { int ret = 0; - struct buffer_head *dir_bh = NULL; ret = ocfs2_init_security_get(inode, dir, qstr, NULL); if (ret) { @@ -7220,17 +7217,10 @@ int ocfs2_init_security_and_acl(struct inode *dir, goto leave; } - ret = ocfs2_inode_lock(dir, &dir_bh, 0); - if (ret) { - mlog_errno(ret); - goto leave; - } ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL); if (ret) mlog_errno(ret); - ocfs2_inode_unlock(dir, 0); - brelse(dir_bh); leave: return ret; } diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 00308b57f64f1..b27fd8ba00196 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -83,6 +83,7 @@ int ocfs2_reflink_xattrs(struct inode *old_inode, struct buffer_head *new_bh, bool preserve_security); int ocfs2_init_security_and_acl(struct inode *dir, + struct buffer_head *dir_bh, struct inode *inode, const struct qstr *qstr); #endif /* OCFS2_XATTR_H */ From 4c0c3c9f29cdda32a6f47d06cab599b32b5be0b2 Mon Sep 17 00:00:00 2001 From: Wangyan Date: Fri, 8 Jul 2022 20:02:34 -0700 Subject: [PATCH 275/321] ocfs2: clear links count in ocfs2_mknod() if an error occurs In this condition, the inode can not be wiped when error happened. ocfs2_mkdir() ->ocfs2_mknod() ->ocfs2_mknod_locked() ->__ocfs2_mknod_locked() ->ocfs2_set_links_count() // i_links_count is 2 -> ... // an error accrue, goto roll_back or leave. ->ocfs2_commit_trans() ->iput(inode) ->evict() ->ocfs2_evict_inode() ->ocfs2_delete_inode() ->ocfs2_inode_lock() ->ocfs2_inode_lock_update() ->ocfs2_refresh_inode() ->set_nlink(); // inode->i_nlink is 2 now. /* if wipe is 0, it will goto bail_unlock_inode */ ->ocfs2_query_inode_wipe() ->if (inode->i_nlink) return; // wipe is 0. /* inode can not be wiped */ ->ocfs2_wipe_inode() So, we need clear links before the transaction committed. Link: http://lkml.kernel.org/r/d8147c41-fb2b-bdf7-b660-1f3c8448c33f@huawei.com Signed-off-by: Yan Wang Reviewed-by: Jun Piao Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Joseph Qi Cc: Changwei Ge Cc: Gang He Cc: Signed-off-by: Andrew Morton --- fs/ocfs2/namei.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index e3dd30dd3547f..ea27e63ec278f 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -453,8 +453,12 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, leave: if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && new_fe_bh != NULL) + ocfs2_set_links_count((struct ocfs2_dinode *) + new_fe_bh->b_data, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) @@ -598,6 +602,8 @@ static int __ocfs2_mknod_locked(struct inode *dir, leave: if (status < 0) { if (*new_fe_bh) { + if (fe) + ocfs2_set_links_count(fe, 0); brelse(*new_fe_bh); *new_fe_bh = NULL; } @@ -2027,8 +2033,12 @@ static int ocfs2_symlink(struct user_namespace *mnt_userns, ocfs2_clusters_to_bytes(osb->sb, 1)); if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && new_fe_bh != NULL) + ocfs2_set_links_count((struct ocfs2_dinode *) + new_fe_bh->b_data, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) From 0428b1b03a16818522be92b7348add358dfb3cfb Mon Sep 17 00:00:00 2001 From: Wangyan Date: Fri, 8 Jul 2022 20:02:34 -0700 Subject: [PATCH 276/321] ocfs2: fix ocfs2 corrupt when iputting an inode In this condition, it will cause an bug on error. ocfs2_mkdir() ->ocfs2_mknod() ->ocfs2_mknod_locked() ->__ocfs2_mknod_locked() //Assume inode->i_generation is genN. ->inode->i_generation = osb->s_next_generation++; // The inode lockres has been initialized. ->ocfs2_populate_inode() ->ocfs2_create_new_inode_locks() ->An error happened, returned value is non-zero // free the start_bit x in bg_blkno ->ocfs2_free_suballoc_bits() ->... /* Another process execute mkdir success in this place, and it occupied the start_bit x in bg_blkno which has been freed before. Its inode->i_generation is genN + 1 */ ->iput(inode) ->evict() ->ocfs2_evict_inode() ->ocfs2_delete_inode() ->ocfs2_inode_lock() ->ocfs2_inode_lock_update() /* Bug on here, genN != genN + 1 */ ->mlog_bug_on_msg(inode->i_generation != le32_to_cpu(fe->i_generation)) So, we need not to reclaim the inode when the inode->ip_inode_lockres has been initialized. It will be freed in iput(). Link: http://lkml.kernel.org/r/ef080ca3-5d74-e276-17a1-d9e7c7e662c9@huawei.com Fixes: b1529a41f777 ("ocfs2: should reclaim the inode if '__ocfs2_mknod_locked' returns an error") Signed-off-by: Yan Wang Reviewed-by: Jun Piao Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Joseph Qi Cc: Changwei Ge Cc: Gang He Cc: Signed-off-by: Andrew Morton --- fs/ocfs2/namei.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index ea27e63ec278f..7d7f2b8f0554e 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -640,7 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, parent_fe_bh, handle, inode_ac, fe_blkno, suballoc_loc, suballoc_bit); - if (status < 0) { + if (status < 0 && !(OCFS2_I(inode)->ip_inode_lockres.l_flags & + OCFS2_LOCK_INITIALIZED)) { u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit); int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode, inode_ac->ac_bh, suballoc_bit, bg_blkno, 1); From 4dfb80fa3a5a1693a9136082239aee05acb88d13 Mon Sep 17 00:00:00 2001 From: Dan Moulding Date: Fri, 8 Jul 2022 20:02:34 -0700 Subject: [PATCH 277/321] init: add "hostname" kernel parameter The gethostname system call returns the hostname for the current machine. However, the kernel has no mechanism to initially set the current machine's name in such a way as to guarantee that the first userspace process to call gethostname will receive a meaningful result. It relies on some unspecified userspace process to first call sethostname before gethostname can produce a meaningful name. Traditionally the machine's hostname is set from userspace by the init system. The init system, in turn, often relies on a configuration file (say, /etc/hostname) to provide the value that it will supply in the call to sethostname. Consequently, the file system containing /etc/hostname usually must be available before the hostname will be set. There may, however, be earlier userspace processes that could call gethostname before the file system containing /etc/hostname is mounted. Such a process will get some other, likely meaningless, name from gethostname (such as "(none)", "localhost", or "darkstar"). A real-world example where this can happen, and lead to undesirable results, is with mdadm. When assembling arrays, mdadm distinguishes between "local" arrays and "foreign" arrays. A local array is one that properly belongs to the current machine, and a foreign array is one that is (possibly temporarily) attached to the current machine, but properly belongs to some other machine. To determine if an array is local or foreign, mdadm may compare the "homehost" recorded on the array with the current hostname. If mdadm is run before the root file system is mounted, perhaps because the root file system itself resides on an md-raid array, then /etc/hostname isn't yet available and the init system will not yet have called sethostname, causing mdadm to incorrectly conclude that all of the local arrays are foreign. Solving this problem *could* be delegated to the init system. It could be left up to the init system (including any init system that starts within an initramfs, if one is in use) to ensure that sethostname is called before any other userspace process could possibly call gethostname. However, it may not always be obvious which processes could call gethostname (for example, udev itself might not call gethostname, but it could via udev rules invoke processes that do). Additionally, the init system has to ensure that the hostname configuration value is stored in some place where it will be readily accessible during early boot. Unfortunately, every init system will attempt to (or has already attempted to) solve this problem in a different, possibly incorrect, way. This makes getting consistently working configurations harder for users. I believe it is better for the kernel to provide the means by which the hostname may be set early, rather than making this a problem for the init system to solve. The option to set the hostname during early startup, via a kernel parameter, provides a simple, reliable way to solve this problem. It also could make system configuration easier for some embedded systems. Link: https://lkml.kernel.org/r/20220505180651.22849-2-dmoulding@me.com Signed-off-by: Dan Moulding Cc: Thomas Gleixner Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/kernel-parameters.txt | 13 +++++++++++++ init/version.c | 17 +++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 2522b11e593f2..5a430286a11db 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1665,6 +1665,19 @@ Valid parameters: "on", "off" Default: "on" + hostname= [KNL] Set the hostname (aka UTS nodename). + Format: + This allows setting the system's hostname during early + startup. This sets the name returned by gethostname. + Using this parameter to set the hostname makes it + possible to ensure the hostname is correctly set before + any userspace processes run, avoiding the possibility + that a process may call gethostname before the hostname + has been explicitly set, resulting in the calling + process getting an incorrect result. The string must + not exceed the maximum allowed hostname length (usually + 64 characters) and will be truncated otherwise. + hlt [BUGS=ARM,SH] hpet= [X86-32,HPET] option to control HPET usage diff --git a/init/version.c b/init/version.c index 1a356f5493e85..66d237d5629c6 100644 --- a/init/version.c +++ b/init/version.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -35,6 +37,21 @@ struct uts_namespace init_uts_ns = { }; EXPORT_SYMBOL_GPL(init_uts_ns); +static int __init early_hostname(char *arg) +{ + size_t bufsize = sizeof(init_uts_ns.name.nodename); + size_t maxlen = bufsize - 1; + + strncpy(init_uts_ns.name.nodename, arg, bufsize); + if (strlen(arg) > maxlen) { + pr_warn("hostname parameter exceeds %zd characters and will be truncated", + maxlen); + init_uts_ns.name.nodename[maxlen] = '\0'; + } + return 0; +} +early_param("hostname", early_hostname); + /* FIXED STRINGS! Don't touch! */ const char linux_banner[] = "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" From f42c9a42eba342083f322acf6706d567cca3830e Mon Sep 17 00:00:00 2001 From: Dan Moulding Date: Fri, 8 Jul 2022 20:02:34 -0700 Subject: [PATCH 278/321] init-add-hostname-kernel-parameter-v2 Link: https://lkml.kernel.org/r/20220506060310.7495-2-dmoulding@me.com Signed-off-by: Dan Moulding Signed-off-by: Andrew Morton --- Documentation/admin-guide/kernel-parameters.txt | 4 ++-- init/version.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 5a430286a11db..2c9c0229b7707 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1665,6 +1665,8 @@ Valid parameters: "on", "off" Default: "on" + hlt [BUGS=ARM,SH] + hostname= [KNL] Set the hostname (aka UTS nodename). Format: This allows setting the system's hostname during early @@ -1678,8 +1680,6 @@ not exceed the maximum allowed hostname length (usually 64 characters) and will be truncated otherwise. - hlt [BUGS=ARM,SH] - hpet= [X86-32,HPET] option to control HPET usage Format: { enable (default) | disable | force | verbose } diff --git a/init/version.c b/init/version.c index 66d237d5629c6..b7f9559d417c7 100644 --- a/init/version.c +++ b/init/version.c @@ -41,12 +41,12 @@ static int __init early_hostname(char *arg) { size_t bufsize = sizeof(init_uts_ns.name.nodename); size_t maxlen = bufsize - 1; + size_t arglen; - strncpy(init_uts_ns.name.nodename, arg, bufsize); - if (strlen(arg) > maxlen) { + arglen = strlcpy(init_uts_ns.name.nodename, arg, bufsize); + if (arglen > maxlen) { pr_warn("hostname parameter exceeds %zd characters and will be truncated", maxlen); - init_uts_ns.name.nodename[maxlen] = '\0'; } return 0; } From 17339b3962a3e4b3cf9b86cede3c5e2f202d6552 Mon Sep 17 00:00:00 2001 From: Andrew Halaney Date: Fri, 8 Jul 2022 20:02:35 -0700 Subject: [PATCH 279/321] init/main.c: silence some -Wunused-parameter warnings There are a bunch of callbacks with unused arguments, go ahead and silence those so "make KCFLAGS=-W init/main.o" is a little quieter. Here's a little sample: init/main.c:182:43: warning: unused parameter 'str' [-Wunused-parameter] static int __init set_reset_devices(char *str) Link: https://lkml.kernel.org/r/20210519162341.1275452-1-ahalaney@redhat.com Signed-off-by: Andrew Halaney Cc: Rasmus Villemoes Signed-off-by: Andrew Morton --- init/main.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/init/main.c b/init/main.c index 0ee39cdcfcac9..0f452ae3b20f5 100644 --- a/init/main.c +++ b/init/main.c @@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(static_key_initialized); unsigned int reset_devices; EXPORT_SYMBOL(reset_devices); -static int __init set_reset_devices(char *str) +static int __init set_reset_devices(char *str __always_unused) { reset_devices = 1; return 1; @@ -231,13 +231,13 @@ static bool __init obsolete_checksetup(char *line) unsigned long loops_per_jiffy = (1<<12); EXPORT_SYMBOL(loops_per_jiffy); -static int __init debug_kernel(char *str) +static int __init debug_kernel(char *str __always_unused) { console_loglevel = CONSOLE_LOGLEVEL_DEBUG; return 0; } -static int __init quiet_kernel(char *str) +static int __init quiet_kernel(char *str __always_unused) { console_loglevel = CONSOLE_LOGLEVEL_QUIET; return 0; @@ -474,7 +474,7 @@ static void __init setup_boot_config(void) get_boot_config_from_initrd(NULL); } -static int __init warn_bootconfig(char *str) +static int __init warn_bootconfig(char *str __always_unused) { pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n"); return 0; @@ -503,7 +503,8 @@ static void __init repair_env_string(char *param, char *val) /* Anything after -- gets handed straight to init. */ static int __init set_init_arg(char *param, char *val, - const char *unused, void *arg) + const char *unused __always_unused, + void *arg __always_unused) { unsigned int i; @@ -528,7 +529,8 @@ static int __init set_init_arg(char *param, char *val, * unused parameters (modprobe will find them in /proc/cmdline). */ static int __init unknown_bootoption(char *param, char *val, - const char *unused, void *arg) + const char *unused __always_unused, + void *arg __always_unused) { size_t len = strlen(param); @@ -728,7 +730,8 @@ noinline void __ref rest_init(void) /* Check for early params. */ static int __init do_early_param(char *param, char *val, - const char *unused, void *arg) + const char *unused __always_unused, + void *arg __always_unused) { const struct obs_kernel_param *p; @@ -1347,8 +1350,10 @@ static const char *initcall_level_names[] __initdata = { "late", }; -static int __init ignore_unknown_bootoption(char *param, char *val, - const char *unused, void *arg) +static int __init ignore_unknown_bootoption(char *param __always_unused, + char *val __always_unused, + const char *unused __always_unused, + void *arg __always_unused) { return 0; } @@ -1487,7 +1492,7 @@ void __weak free_initmem(void) free_initmem_default(POISON_FREE_INITMEM); } -static int __ref kernel_init(void *unused) +static int __ref kernel_init(void *unused __always_unused) { int ret; From a507fc49e898ebbb91b37b759e86711fc0e32097 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Wed, 1 Jun 2022 22:02:43 +0530 Subject: [PATCH 280/321] resource: re-factor page_is_ram() Presently page_is_ram() relies on walk_system_ram_range() that performs a walk on kernel iomem resources hierarchy with a dummy callback __is_ram(). Before calling find_next_iomem_res(), walk_system_ram_range() does some book-keeping which can be avoided for page_is_ram() use-case. Hence this patch proposes to update page_is_ram() to directly call find_next_iomem_res() with minimal book-keeping needed. To avoid allocating a 'struct resource' the patch also updates find_next_iomem_res() to not return -EINVAL in case 'res == NULL'. Instead our 'struct resource *res' is only populated when its not NULL. Link: https://lkml.kernel.org/r/20220601163243.3806231-1-vaibhav@linux.ibm.com Signed-off-by: Vaibhav Jain Cc: Vaibhav Jain Cc: "Aneesh Kumar K . V" Cc: David Hildenbrand Cc: Dan Williams Cc: Miaohe Lin Cc: Muchun Song Signed-off-by: Andrew Morton --- kernel/resource.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/kernel/resource.c b/kernel/resource.c index 34eaee179689a..ecf6b9a50adc1 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -311,7 +311,7 @@ EXPORT_SYMBOL(release_resource); * * If a resource is found, returns 0 and @*res is overwritten with the part * of the resource that's within [@start..@end]; if none is found, returns - * -ENODEV. Returns -EINVAL for invalid parameters. + * -ENODEV. * * @start: start address of the resource searched for * @end: end address of same resource @@ -328,9 +328,6 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, { struct resource *p; - if (!res) - return -EINVAL; - if (start >= end) return -EINVAL; @@ -356,7 +353,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, break; } - if (p) { + if (p && res) { /* copy data */ *res = (struct resource) { .start = max(start, p->start), @@ -474,18 +471,18 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, return ret; } -static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) -{ - return 1; -} - /* * This generic page_is_ram() returns true if specified address is * registered as System RAM in iomem_resource list. */ int __weak page_is_ram(unsigned long pfn) { - return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; + const resource_size_t pfn_res = PFN_PHYS(pfn); + + return find_next_iomem_res(pfn_res, + pfn_res + 1, + IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, + IORES_DESC_NONE, NULL) == 0; } EXPORT_SYMBOL_GPL(page_is_ram); From eb86eb28601080191224be430674db7fe85834ff Mon Sep 17 00:00:00 2001 From: wuchi Date: Sat, 11 Jun 2022 21:06:34 +0800 Subject: [PATCH 281/321] lib/debugobjects: fix stat count and optimize debug_objects_mem_init. 1. Var debug_objects_allocated tracks valid kmem_cache_alloc calls, so track it in debug_objects_replace_static_objects. Do similar things in object_cpu_offline. 2. In debug_objects_mem_init, there is no need to call function cpuhp_setup_state_nocalls when debug_objects_enabled = 0 (out of memory). Link: https://lkml.kernel.org/r/20220611130634.99741-1-wuchi.zero@gmail.com Signed-off-by: wuchi Cc: Thomas Gleixner Cc: Christoph Hellwig Cc: Kees Cook Cc: Waiman Long Signed-off-by: Andrew Morton --- lib/debugobjects.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 337d797a71416..6f8e5dd1dcd0c 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu) struct debug_percpu_free *percpu_pool; struct hlist_node *tmp; struct debug_obj *obj; + unsigned long flags; /* Remote access is safe as the CPU is dead already */ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); @@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu) hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } + + raw_spin_lock_irqsave(&pool_lock, flags); + obj_pool_used -= percpu_pool->obj_free; + debug_objects_freed += percpu_pool->obj_free; + raw_spin_unlock_irqrestore(&pool_lock, flags); + percpu_pool->obj_free = 0; return 0; @@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void) hlist_add_head(&obj->node, &objects); } + debug_objects_allocated += i; + /* * debug_objects_mem_init() is now called early that only one CPU is up * and interrupts have been disabled, so it is safe to replace the @@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void) debug_objects_enabled = 0; kmem_cache_destroy(obj_cache); pr_warn("out of memory.\n"); + return; } else debug_objects_selftest(); From 3212b70a011856cf9007932844befb9db33aedd1 Mon Sep 17 00:00:00 2001 From: wuchi Date: Sat, 18 Jun 2022 16:25:21 +0800 Subject: [PATCH 282/321] lib/lru_cache: fix error free handing in lc_create When kmem_cache_alloc in function lc_create returns null, we will free the memory already allocated. The loop of kmem_cache_free is wrong, especially: i = 0 ==> do wrong loop i > 0 ==> do not free element[0] Link: https://lkml.kernel.org/r/20220618082521.7082-1-wuchi.zero@gmail.com Signed-off-by: wuchi Cc: Philipp Reisner Cc: Lars Ellenberg Cc: Christoph Bhmwalder Signed-off-by: Andrew Morton --- lib/lru_cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 52313acbfa628..dc35464216d3c 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -147,8 +147,8 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, return lc; /* else: could not allocate all elements, give up */ - for (i--; i; i--) { - void *p = element[i]; + while (i) { + void *p = element[--i]; kmem_cache_free(cache, p - e_off); } kfree(lc); From 1322ca5c3b4a564034093be74a4dd16bdab6ff91 Mon Sep 17 00:00:00 2001 From: wuchi Date: Sun, 19 Jun 2022 15:46:41 +0800 Subject: [PATCH 283/321] net, lib/once: remove {net_}get_random_once_wait macro DO_ONCE(func, ...) will call func with spinlock which acquired by spin_lock_irqsave in __do_once_start. But the get_random_once_wait will sleep in get_random_bytes_wait -> wait_for_random_bytes. Fortunately, there is no place to use {net_}get_random_once_wait, so we could remove them simply. Link: https://lkml.kernel.org/r/20220619074641.40916-1-wuchi.zero@gmail.com Signed-off-by: wuchi Acked-by: Jakub Kicinski Cc: David S. Miller Cc: Eric Dumazet Cc: Paolo Abeni Signed-off-by: Andrew Morton --- include/linux/net.h | 2 -- include/linux/once.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/include/linux/net.h b/include/linux/net.h index 12093f4db50c4..8613772a1f580 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -303,8 +303,6 @@ do { \ #define net_get_random_once(buf, nbytes) \ get_random_once((buf), (nbytes)) -#define net_get_random_once_wait(buf, nbytes) \ - get_random_once_wait((buf), (nbytes)) /* * E.g. XFS meta- & log-data is in slab pages, or bcache meta diff --git a/include/linux/once.h b/include/linux/once.h index f54523052bbcb..b14d8b309d52b 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -54,7 +54,5 @@ void __do_once_done(bool *done, struct static_key_true *once_key, #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) -#define get_random_once_wait(buf, nbytes) \ - DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ #endif /* _LINUX_ONCE_H */ From 4a6e0a7ad1cde5f54b9b5d8e71b2688f553c258e Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 20 Jun 2022 17:02:49 +0200 Subject: [PATCH 284/321] lib/stackdepot: replace CONFIG_STACK_HASH_ORDER with automatic sizing As Linus explained [1], setting the stackdepot hash table size as a config option is suboptimal, especially as stackdepot becomes a dependency of less "expert" subsystems than initially (e.g. DRM, networking, SLUB_DEBUG): : (a) it introduces a new compile-time question that isn't sane to ask : a regular user, but is now exposed to regular users. : (b) this by default uses 1MB of memory for a feature that didn't in : the past, so now if you have small machines you need to make sure you : make a special kernel config for them. Ideally we would employ rhashtable for fully automatic resizing, which should be feasible for many of the new users, but problematic for the original users with restricted context that call __stack_depot_save() with can_alloc == false, i.e. KASAN. However we can easily remove the config option and scale the hash table automatically with system memory. The STACK_HASH_MASK constant becomes stack_hash_mask variable and is used only in one mask operation, so the overhead should be negligible to none. For early allocation we can employ the existing alloc_large_system_hash() function and perform similar scaling for the late allocation. The existing limits of the config option (between 4k and 1M buckets) are preserved, and scaling factor is set to one bucket per 16kB memory so on 64bit the max 1M buckets (8MB memory) is achieved with 16GB system, while a 1GB system will use 512kB. Because KASAN is reported to need the maximum number of buckets even with smaller amounts of memory [2], set it as such when kasan_enabled(). If needed, the automatic scaling could be complemented with a boot-time kernel parameter, but it feels pointless to add it without a specific use case. [1] https://lore.kernel.org/all/CAHk-=wjC5nS+fnf6EzRD9yQRJApAhxx7gRB87ZV+pAWo9oVrTg@mail.gmail.com/ [2] https://lore.kernel.org/all/CACT4Y+Y4GZfXOru2z5tFPzFdaSUd+GFc6KVL=bsa0+1m197cQQ@mail.gmail.com/ Link: https://lkml.kernel.org/r/20220620150249.16814-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka Reported-by: Linus Torvalds Acked-by: Dmitry Vyukov Cc: Marco Elver Cc: Alexander Potapenko Cc: Andrey Konovalov Signed-off-by: Andrew Morton --- lib/Kconfig | 9 -------- lib/stackdepot.c | 59 ++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 49 insertions(+), 19 deletions(-) diff --git a/lib/Kconfig b/lib/Kconfig index eaaad4d85bf24..986ea474836c3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -685,15 +685,6 @@ config STACKDEPOT_ALWAYS_INIT bool select STACKDEPOT -config STACK_HASH_ORDER - int "stack depot hash size (12 => 4KB, 20 => 1024KB)" - range 12 20 - default 20 - depends on STACKDEPOT - help - Select the hash size as a power of 2 for the stackdepot hash table. - Choose a lower value to reduce the memory impact. - config REF_TRACKER bool depends on STACKTRACE_SUPPORT diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 5ca0d086ef4a3..e73fda23388d8 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -32,6 +32,7 @@ #include #include #include +#include #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) @@ -145,10 +146,16 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) return stack; } -#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER) -#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) +/* one hash table bucket entry per 16kB of memory */ +#define STACK_HASH_SCALE 14 +/* limited between 4k and 1M buckets */ +#define STACK_HASH_ORDER_MIN 12 +#define STACK_HASH_ORDER_MAX 20 #define STACK_HASH_SEED 0x9747b28c +static unsigned int stack_hash_order; +static unsigned int stack_hash_mask; + static bool stack_depot_disable; static struct stack_record **stack_table; @@ -175,7 +182,7 @@ void __init stack_depot_want_early_init(void) int __init stack_depot_early_init(void) { - size_t size; + unsigned long entries = 0; /* This is supposed to be called only once, from mm_init() */ if (WARN_ON(__stack_depot_early_init_passed)) @@ -183,13 +190,23 @@ int __init stack_depot_early_init(void) __stack_depot_early_init_passed = true; + if (kasan_enabled() && !stack_hash_order) + stack_hash_order = STACK_HASH_ORDER_MAX; + if (!__stack_depot_want_early_init || stack_depot_disable) return 0; - size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); - pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n", - size); - stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + if (stack_hash_order) + entries = 1UL << stack_hash_order; + stack_table = alloc_large_system_hash("stackdepot", + sizeof(struct stack_record *), + entries, + STACK_HASH_SCALE, + HASH_EARLY | HASH_ZERO, + NULL, + &stack_hash_mask, + 1UL << STACK_HASH_ORDER_MIN, + 1UL << STACK_HASH_ORDER_MAX); if (!stack_table) { pr_err("Stack Depot hash table allocation failed, disabling\n"); @@ -207,13 +224,35 @@ int stack_depot_init(void) mutex_lock(&stack_depot_init_mutex); if (!stack_depot_disable && !stack_table) { - pr_info("Stack Depot allocating hash table with kvcalloc\n"); - stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL); + unsigned long entries; + int scale = STACK_HASH_SCALE; + + if (stack_hash_order) { + entries = 1UL << stack_hash_order; + } else { + entries = nr_free_buffer_pages(); + entries = roundup_pow_of_two(entries); + + if (scale > PAGE_SHIFT) + entries >>= (scale - PAGE_SHIFT); + else + entries <<= (PAGE_SHIFT - scale); + } + + if (entries < 1UL << STACK_HASH_ORDER_MIN) + entries = 1UL << STACK_HASH_ORDER_MIN; + if (entries > 1UL << STACK_HASH_ORDER_MAX) + entries = 1UL << STACK_HASH_ORDER_MAX; + + pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n", + entries); + stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); if (!stack_table) { pr_err("Stack Depot hash table allocation failed, disabling\n"); stack_depot_disable = true; ret = -ENOMEM; } + stack_hash_mask = entries - 1; } mutex_unlock(&stack_depot_init_mutex); return ret; @@ -386,7 +425,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, goto fast_exit; hash = hash_stack(entries, nr_entries); - bucket = &stack_table[hash & STACK_HASH_MASK]; + bucket = &stack_table[hash & stack_hash_mask]; /* * Fast path: look the stack trace up without locking. From c1ca2ce0a6015c3079ae28670bba81086d9d082e Mon Sep 17 00:00:00 2001 From: wuchi Date: Mon, 20 Jun 2022 18:02:44 +0800 Subject: [PATCH 285/321] lib/error-inject: traverse list with mutex Traversing list without mutex in get_injectable_error_type will race with the following code: list_del_init(&ent->list) kfree(ent) in module_unload_ei_list. So fix that. Link: https://lkml.kernel.org/r/20220620100244.82896-1-wuchi.zero@gmail.com Signed-off-by: wuchi Cc: Masami Hiramatsu (Google) Cc: Martin KaFai Lau Cc: Song Liu Cc: Yonghong Song Cc: John Fastabend Cc: KP Singh Signed-off-by: Andrew Morton --- lib/error-inject.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/error-inject.c b/lib/error-inject.c index 4a4f1278c4191..1afca1b1cdead 100644 --- a/lib/error-inject.c +++ b/lib/error-inject.c @@ -40,12 +40,18 @@ bool within_error_injection_list(unsigned long addr) int get_injectable_error_type(unsigned long addr) { struct ei_entry *ent; + int ei_type = EI_ETYPE_NONE; + mutex_lock(&ei_mutex); list_for_each_entry(ent, &error_injection_list, list) { - if (addr >= ent->start_addr && addr < ent->end_addr) - return ent->etype; + if (addr >= ent->start_addr && addr < ent->end_addr) { + ei_type = ent->etype; + break; + } } - return EI_ETYPE_NONE; + mutex_unlock(&ei_mutex); + + return ei_type; } /* From 81cc7f0d7ed027c4bd857b43a4f81901625e545a Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Fri, 24 Jun 2022 16:14:12 +0200 Subject: [PATCH 286/321] compiler-gcc.h: remove ancient workaround for gcc PR 58670 The workaround for 'asm goto' miscompilation introduces a compiler barrier quirk that inhibits many useful compiler optimizations. For example, __try_cmpxchg_user compiles to: 11375: 41 8b 4d 00 mov 0x0(%r13),%ecx 11379: 41 8b 02 mov (%r10),%eax 1137c: f0 0f b1 0a lock cmpxchg %ecx,(%rdx) 11380: 0f 94 c2 sete %dl 11383: 84 d2 test %dl,%dl 11385: 75 c4 jne 1134b <...> 11387: 41 89 02 mov %eax,(%r10) where the barrier inhibits flags propagation from asm when compiled with gcc-12. When the mentioned quirk is removed, the following code is generated: 11553: 41 8b 4d 00 mov 0x0(%r13),%ecx 11557: 41 8b 02 mov (%r10),%eax 1155a: f0 0f b1 0a lock cmpxchg %ecx,(%rdx) 1155e: 74 c9 je 11529 <...> 11560: 41 89 02 mov %eax,(%r10) The refered compiler bug: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 was fixed for gcc-4.8.2. Current minimum required version of GCC is version 5.1 which has the above 'asm goto' miscompilation fixed, so remove the workaround. Link: https://lkml.kernel.org/r/20220624141412.72274-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/compiler-gcc.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index a0c55eeaeaf16..9b157b71036f1 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -66,17 +66,6 @@ __builtin_unreachable(); \ } while (0) -/* - * GCC 'asm goto' miscompiles certain code sequences: - * - * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 - * - * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. - * - * (asm goto is automatically volatile - the naming reflects this.) - */ -#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) - #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ From 9b54d9c4227a836afb2e78ed0fb0fd655bbc2215 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 24 Jun 2022 08:30:04 +0300 Subject: [PATCH 287/321] kfifo: fix kfifo_to_user() return type The kfifo_to_user() macro is supposed to return zero for success or negative error codes. Unfortunately, there is a signedness bug so it returns unsigned int. This only affects callers which try to save the result in ssize_t and as far as I can see the only place which does that is line6_hwdep_read(). TL;DR: s/_uint/_int/. Link: https://lkml.kernel.org/r/YrVL3OJVLlNhIMFs@kili Fixes: 144ecf310eb5 ("kfifo: fix kfifo_alloc() to return a signed int value") Signed-off-by: Dan Carpenter Cc: Stefani Seibold Cc: Randy Dunlap Signed-off-by: Andrew Morton --- include/linux/kfifo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 86249476b57f4..0b35a41440ff1 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \ * writer, you don't need extra locking to use these macro. */ #define kfifo_to_user(fifo, to, len, copied) \ -__kfifo_uint_must_check_helper( \ +__kfifo_int_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ void __user *__to = (to); \ From 9209a48e6ca15244b3c0a09e6e3170e05b2b64df Mon Sep 17 00:00:00 2001 From: wuchi Date: Sat, 25 Jun 2022 21:53:24 +0800 Subject: [PATCH 288/321] lib/radix-tree: remove unused argument of insert_entries insert_entries() doesn't use the 'bool replace' argument, and the function is only used locally, remove the argument. The historical context of the unused argument is as follow: 2: commit <3a08cd52c37c79> (radix tree: Remove multiorder support) Remove the code related to macro CONFIG_RADIX_TREE_MULTIORDER to convert to the xArray. Without the macro, there is no need to retain the argument. 1: commit <175542f575723e> (radix-tree: add radix_tree_join) Add insert_entries(..., bool replace) function, depending on the macro CONFIG_RADIX_TREE_MULTIORDER definition, the implementation is different. Notice that the implementation without the macro doesn't use the argument. [Matthew Wilcox: add historical context for argument] Link: https://lkml.kernel.org/r/20220625135324.72574-1-wuchi.zero@gmail.com Signed-off-by: wuchi Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- lib/radix-tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index b3afafe46fffb..3c78e1e8b2ad6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -677,7 +677,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) } static inline int insert_entries(struct radix_tree_node *node, - void __rcu **slot, void *item, bool replace) + void __rcu **slot, void *item) { if (*slot) return -EEXIST; @@ -711,7 +711,7 @@ int radix_tree_insert(struct radix_tree_root *root, unsigned long index, if (error) return error; - error = insert_entries(node, slot, item, false); + error = insert_entries(node, slot, item); if (error < 0) return error; From 6ddf592ff11d41c644e580a384a1c2823a2025ed Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 27 Jun 2022 11:02:45 +0800 Subject: [PATCH 289/321] lib: make LZ4_decompress_safe_forceExtDict() static LZ4_decompress_safe_forceExtDict() is only used in lib/lz4/lz4_decompress.c, make it static to fix the build warning about "no previous prototype" [1]. [1] https://lore.kernel.org/lkml/202206260948.akgsho1q-lkp@intel.com/ Link: https://lkml.kernel.org/r/1656298965-8698-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Tiezhu Yang Reported-by: kernel test robot Signed-off-by: Andrew Morton --- lib/lz4/lz4_decompress.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index fd1728d94babb..59fe69a638000 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -507,9 +507,9 @@ static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, (BYTE *)dest - prefixSize, NULL, 0); } -int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, - int compressedSize, int maxOutputSize, - const void *dictStart, size_t dictSize) +static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, From 26dcd2c3c43a5e0d4cd9488fe75327505d2f1c7d Mon Sep 17 00:00:00 2001 From: wuchi Date: Wed, 29 Jun 2022 11:02:41 +0800 Subject: [PATCH 290/321] lib/scatterlist: use matched parameter type when calling __sg_free_table() commit 4635873c561a ("scsi: lib/sg_pool.c: improve APIs for allocating sg pool") changeed @(bool)skip_first_chunk of __sg_free_table() to @(unsigned int)nents_first_chunk, so use unsigend int type instead of bool type (false -> 0) when calling the function in sg_free_append_table() and sg_free_table(). Link: https://lkml.kernel.org/r/20220629030241.84559-1-wuchi.zero@gmail.com Signed-off-by: wuchi Reviewed-by: Ming Lei Cc: Maor Gottlieb Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- lib/scatterlist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d5e82e4a57ad0..c8c3d675845c3 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -240,7 +240,7 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_append_table(struct sg_append_table *table) { - __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->total_nents); } EXPORT_SYMBOL(sg_free_append_table); @@ -253,7 +253,7 @@ EXPORT_SYMBOL(sg_free_append_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->orig_nents); } EXPORT_SYMBOL(sg_free_table); From b8d255d6d408b4a311b2faa6ed4a115606345ea2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 4 Jul 2022 22:53:25 +0100 Subject: [PATCH 291/321] lib/ts_bm.c: remove redundant store to variable consumed after addition There is no need to store the result of the addition back to variable consumed after the addition. The store is redundant, replace += with just + Cleans up clang scan build warning: lib/ts_bm.c:83:11: warning: Although the value stored to 'consumed' is used in the enclosing expression, the value is never actually read from 'consumed' [deadcode.DeadStores] Link: https://lkml.kernel.org/r/20220704215325.600993-1-colin.i.king@gmail.com Signed-off-by: Colin Ian King Signed-off-by: Andrew Morton --- lib/ts_bm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 4cf250031f0f0..1f2234221dd11 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -80,7 +80,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) /* London calling... */ DEBUGP("found!\n"); - return consumed += (shift-(bm->patlen-1)); + return consumed + (shift-(bm->patlen-1)); next: bs = bm->bad_shift[text[shift-i]]; From 16a8e26dcd5ae43cd5ab98fdc166e9b73468c23c Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 17 Jun 2022 16:38:09 +0800 Subject: [PATCH 292/321] Revert "squashfs: provide backing_dev_info in order to disable read-ahead" Patch series "Implement readahead for squashfs", v7. Commit 9eec1d897139("squashfs: provide backing_dev_info in order to disable read-ahead") mitigates the performance drop issue for squashfs by closing readahead for it. This series implements readahead callback for squashfs. This patch (of 4): This reverts 9eec1d897139e5 ("squashfs: provide backing_dev_info in order to disable read-ahead"). Revert closing the readahead to squashfs since the readahead callback for squashfs is implemented. Link: https://lkml.kernel.org/r/20220617083810.337573-1-hsinyi@chromium.org Link: https://lkml.kernel.org/r/20220617083810.337573-2-hsinyi@chromium.org Signed-off-by: Hsin-Yi Wang Suggested-by: Xiongwei Song Cc: Phillip Lougher Cc: Matthew Wilcox Cc: Marek Szyprowski Cc: Zheng Liang Cc: Zhang Yi Cc: Hou Tao Cc: Miao Xie Cc: kernel test robot Signed-off-by: Andrew Morton --- fs/squashfs/super.c | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 6d594ba2ed28f..32565dafa7f3b 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "squashfs_fs.h" #include "squashfs_fs_sb.h" @@ -113,24 +112,6 @@ static const struct squashfs_decompressor *supported_squashfs_filesystem( return decompressor; } -static int squashfs_bdi_init(struct super_block *sb) -{ - int err; - unsigned int major = MAJOR(sb->s_dev); - unsigned int minor = MINOR(sb->s_dev); - - bdi_put(sb->s_bdi); - sb->s_bdi = &noop_backing_dev_info; - - err = super_setup_bdi_name(sb, "squashfs_%u_%u", major, minor); - if (err) - return err; - - sb->s_bdi->ra_pages = 0; - sb->s_bdi->io_pages = 0; - - return 0; -} static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) { @@ -146,20 +127,6 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) TRACE("Entered squashfs_fill_superblock\n"); - /* - * squashfs provides 'backing_dev_info' in order to disable read-ahead. For - * squashfs, I/O is not deferred, it is done immediately in read_folio, - * which means the user would always have to wait their own I/O. So the effect - * of readahead is very weak for squashfs. squashfs_bdi_init will set - * sb->s_bdi->ra_pages and sb->s_bdi->io_pages to 0 and close readahead for - * squashfs. - */ - err = squashfs_bdi_init(sb); - if (err) { - errorf(fc, "squashfs init bdi failed"); - return err; - } - sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); From 613471e83e8a67034067553b69eac712f6f671ea Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Fri, 17 Jun 2022 16:38:11 +0800 Subject: [PATCH 293/321] squashfs: always build "file direct" version of page actor Squashfs_readahead uses the "file direct" version of the page actor, and so build it unconditionally. Link: https://lkml.kernel.org/r/20220617083810.337573-3-hsinyi@chromium.org Signed-off-by: Phillip Lougher Signed-off-by: Hsin-Yi Wang Reported-by: kernel test robot Cc: Hou Tao Cc: Marek Szyprowski Cc: Matthew Wilcox Cc: Miao Xie Cc: Xiongwei Song Cc: Zhang Yi Cc: Zheng Liang Signed-off-by: Andrew Morton --- fs/squashfs/Makefile | 4 ++-- fs/squashfs/page_actor.h | 46 ---------------------------------------- 2 files changed, 2 insertions(+), 48 deletions(-) diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile index 7bd9b8b856d0b..477c89a519ee8 100644 --- a/fs/squashfs/Makefile +++ b/fs/squashfs/Makefile @@ -5,9 +5,9 @@ obj-$(CONFIG_SQUASHFS) += squashfs.o squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o -squashfs-y += namei.o super.o symlink.o decompressor.o +squashfs-y += namei.o super.o symlink.o decompressor.o page_actor.o squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o -squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o +squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h index 37523c54256fa..24841d28bc0fb 100644 --- a/fs/squashfs/page_actor.h +++ b/fs/squashfs/page_actor.h @@ -6,51 +6,6 @@ * Phillip Lougher */ -#ifndef CONFIG_SQUASHFS_FILE_DIRECT -struct squashfs_page_actor { - void **page; - int pages; - int length; - int next_page; -}; - -static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, - int pages, int length) -{ - struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); - - if (actor == NULL) - return NULL; - - actor->length = length ? : pages * PAGE_SIZE; - actor->page = page; - actor->pages = pages; - actor->next_page = 0; - return actor; -} - -static inline void *squashfs_first_page(struct squashfs_page_actor *actor) -{ - actor->next_page = 1; - return actor->page[0]; -} - -static inline void *squashfs_next_page(struct squashfs_page_actor *actor) -{ - return actor->next_page == actor->pages ? NULL : - actor->page[actor->next_page++]; -} - -static inline void squashfs_finish_page(struct squashfs_page_actor *actor) -{ - /* empty */ -} - -static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor) -{ - /* empty */ -} -#else struct squashfs_page_actor { union { void **buffer; @@ -91,4 +46,3 @@ static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor) actor->alloc_buffer = 0; } #endif -#endif From 84467ed96af1841245a727f4ea89e1fb3da84f1f Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 17 Jun 2022 16:38:13 +0800 Subject: [PATCH 294/321] squashfs: implement readahead Implement readahead callback for squashfs. It will read datablocks which cover pages in readahead request. For a few cases it will not mark page as uptodate, including: - file end is 0. - zero filled blocks. - current batch of pages isn't in the same datablock. - decompressor error. Otherwise pages will be marked as uptodate. The unhandled pages will be updated by readpage later. Link: https://lkml.kernel.org/r/20220617083810.337573-4-hsinyi@chromium.org Signed-off-by: Hsin-Yi Wang Suggested-by: Matthew Wilcox Reported-by: Matthew Wilcox Reported-by: Phillip Lougher Reported-by: Xiongwei Song Reported-by: Andrew Morton Cc: Hou Tao Cc: kernel test robot Cc: Marek Szyprowski Cc: Miao Xie Cc: Zhang Yi Cc: Zheng Liang Signed-off-by: Andrew Morton --- fs/squashfs/file.c | 92 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 91 insertions(+), 1 deletion(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index a8e495d8eb860..128ebe9aded87 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -39,6 +39,7 @@ #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" +#include "page_actor.h" /* * Locate cache slot in range [offset, index] for specified inode. If @@ -495,7 +496,96 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) return 0; } +static void squashfs_readahead(struct readahead_control *ractl) +{ + struct inode *inode = ractl->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + size_t mask = (1UL << msblk->block_log) - 1; + unsigned short shift = msblk->block_log - PAGE_SHIFT; + loff_t start = readahead_pos(ractl) & ~mask; + size_t len = readahead_length(ractl) + readahead_pos(ractl) - start; + struct squashfs_page_actor *actor; + unsigned int nr_pages = 0; + struct page **pages; + int i, file_end = i_size_read(inode) >> msblk->block_log; + unsigned int max_pages = 1UL << shift; + + readahead_expand(ractl, start, (len | mask) + 1); + + if (file_end == 0) + return; + + pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL); + if (!pages) + return; + + for (;;) { + pgoff_t index; + int res, bsize; + u64 block = 0; + unsigned int expected; + + nr_pages = __readahead_batch(ractl, pages, max_pages); + if (!nr_pages) + break; + + if (readahead_pos(ractl) >= i_size_read(inode)) + goto skip_pages; + + index = pages[0]->index >> shift; + if ((pages[nr_pages - 1]->index >> shift) != index) + goto skip_pages; + + expected = index == file_end ? + (i_size_read(inode) & (msblk->block_size - 1)) : + msblk->block_size; + + bsize = read_blocklist(inode, index, &block); + if (bsize == 0) + goto skip_pages; + + actor = squashfs_page_actor_init_special(msblk, pages, nr_pages, + expected); + if (!actor) + goto skip_pages; + + res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); + + kfree(actor); + + if (res == expected) { + int bytes; + + /* Last page (if present) may have trailing bytes not filled */ + bytes = res % PAGE_SIZE; + if (pages[nr_pages - 1]->index == file_end && bytes) + memzero_page(pages[nr_pages - 1], bytes, + PAGE_SIZE - bytes); + + for (i = 0; i < nr_pages; i++) { + flush_dcache_page(pages[i]); + SetPageUptodate(pages[i]); + } + } + + for (i = 0; i < nr_pages; i++) { + unlock_page(pages[i]); + put_page(pages[i]); + } + } + + kfree(pages); + return; + +skip_pages: + for (i = 0; i < nr_pages; i++) { + unlock_page(pages[i]); + put_page(pages[i]); + } + kfree(pages); +} const struct address_space_operations squashfs_aops = { - .read_folio = squashfs_read_folio + .read_folio = squashfs_read_folio, + .readahead = squashfs_readahead }; From f4c0b837bb9e022a2283811b159a47e0d7c7b2a7 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Fri, 17 Jun 2022 16:38:15 +0800 Subject: [PATCH 295/321] squashfs: support reading fragments in readahead call Add a function which can be used to read fragments in the readahead call. This function is necessary because filesystems built with the -tailends (or -always-use-fragments) option may have fragments present which cannot be currently handled. Link: https://lkml.kernel.org/r/20220617083810.337573-5-hsinyi@chromium.org Signed-off-by: Phillip Lougher Signed-off-by: Hsin-Yi Wang Cc: Hou Tao Cc: kernel test robot Cc: Marek Szyprowski Cc: Matthew Wilcox Cc: Miao Xie Cc: Xiongwei Song Cc: Zhang Yi Cc: Zheng Liang Signed-off-by: Andrew Morton --- fs/squashfs/file.c | 47 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 128ebe9aded87..7ff0b03cceab0 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -496,6 +496,41 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) return 0; } +static int squashfs_readahead_fragment(struct page **page, + unsigned int pages, unsigned int expected) +{ + struct inode *inode = page[0]->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; + + if (buffer->error) + goto out; + + expected += squashfs_i(inode)->fragment_offset; + + for (n = 0; n < pages; n++) { + unsigned int base = (page[n]->index & mask) << PAGE_SHIFT; + unsigned int offset = base + squashfs_i(inode)->fragment_offset; + + if (expected > offset) { + unsigned int avail = min_t(unsigned int, expected - + offset, PAGE_SIZE); + + squashfs_fill_page(page[n], buffer, offset, avail); + } + + unlock_page(page[n]); + put_page(page[n]); + } + +out: + squashfs_cache_put(buffer); + return buffer->error; +} + static void squashfs_readahead(struct readahead_control *ractl) { struct inode *inode = ractl->mapping->host; @@ -512,9 +547,6 @@ static void squashfs_readahead(struct readahead_control *ractl) readahead_expand(ractl, start, (len | mask) + 1); - if (file_end == 0) - return; - pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL); if (!pages) return; @@ -540,6 +572,15 @@ static void squashfs_readahead(struct readahead_control *ractl) (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; + if (index == file_end && squashfs_i(inode)->fragment_block != + SQUASHFS_INVALID_BLK) { + res = squashfs_readahead_fragment(pages, nr_pages, + expected); + if (res) + goto skip_pages; + continue; + } + bsize = read_blocklist(inode, index, &block); if (bsize == 0) goto skip_pages; From 78ba47eab5d4c3cd47e03ac273db486667c857af Mon Sep 17 00:00:00 2001 From: Stephen Brennan Date: Mon, 16 May 2022 17:05:07 -0700 Subject: [PATCH 296/321] kallsyms: move declarations to internal header Patch series "Expose kallsyms data in vmcoreinfo note". The kernel can be configured to contain a lot of introspection or debugging information built-in, such as ORC for unwinding stack traces, BTF for type information, and of course kallsyms. Debuggers could use this information to navigate a core dump or live system, but they need to be able to find it. This patch series adds the necessary symbols into vmcoreinfo, which would allow a debugger to find and interpret the kallsyms table. Using the kallsyms data, the debugger can then lookup any symbol, allowing it to find ORC, BTF, or any other useful data. This would allow a live kernel, or core dump, to be debugged without any DWARF debuginfo. This is useful for many cases: the debuginfo may not have been generated, or you may not want to deploy the large files everywhere you need them. I've demonstrated a proof of concept for this at LSF/MM+BPF during a lighting talk. Using a work-in-progress branch of the drgn debugger, and an extended set of BTF generated by a patched version of dwarves, I've been able to open a core dump without any DWARF info and do basic tasks such as enumerating slab caches, block devices, tasks, and doing backtraces. I hope this series can be a first step toward a new possibility of "DWARFless debugging". Related discussion around the BTF side of this: https://lore.kernel.org/bpf/586a6288-704a-f7a7-b256-e18a675927df@oracle.com/T/#u Some work-in-progress branches using this feature: https://github.com/brenns10/dwarves/tree/remove_percpu_restriction_1 https://github.com/brenns10/drgn/tree/kallsyms_plus_btf This patch (of 2): To include kallsyms data in the vmcoreinfo note, we must make the symbol declarations visible outside of kallsyms.c. Move these to a new internal header file. Link: https://lkml.kernel.org/r/20220517000508.777145-1-stephen.s.brennan@oracle.com Link: https://lkml.kernel.org/r/20220517000508.777145-2-stephen.s.brennan@oracle.com Signed-off-by: Stephen Brennan Acked-by: Baoquan He Cc: Nick Desaulniers Cc: Dave Young Cc: Kees Cook Cc: Jiri Olsa Cc: Stephen Boyd Cc: Bixuan Cui Cc: David Vernet Cc: Vivek Goyal Cc: Sami Tolvanen Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton --- kernel/kallsyms.c | 23 +---------------------- kernel/kallsyms_internal.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 22 deletions(-) create mode 100644 kernel/kallsyms_internal.h diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fbdf8d3279aca..510fba0ba5b4a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -31,28 +31,7 @@ #include #include -/* - * These will be re-linked against their real values - * during the second link stage. - */ -extern const unsigned long kallsyms_addresses[] __weak; -extern const int kallsyms_offsets[] __weak; -extern const u8 kallsyms_names[] __weak; - -/* - * Tell the compiler that the count isn't in the small data section if the arch - * has one (eg: FRV). - */ -extern const unsigned int kallsyms_num_syms -__section(".rodata") __attribute__((weak)); - -extern const unsigned long kallsyms_relative_base -__section(".rodata") __attribute__((weak)); - -extern const char kallsyms_token_table[] __weak; -extern const u16 kallsyms_token_index[] __weak; - -extern const unsigned int kallsyms_markers[] __weak; +#include "kallsyms_internal.h" /* * Expand a compressed symbol data into the resulting uncompressed string, diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h new file mode 100644 index 0000000000000..2d0c6f2f0243a --- /dev/null +++ b/kernel/kallsyms_internal.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef LINUX_KALLSYMS_INTERNAL_H_ +#define LINUX_KALLSYMS_INTERNAL_H_ + +#include + +/* + * These will be re-linked against their real values + * during the second link stage. + */ +extern const unsigned long kallsyms_addresses[] __weak; +extern const int kallsyms_offsets[] __weak; +extern const u8 kallsyms_names[] __weak; + +/* + * Tell the compiler that the count isn't in the small data section if the arch + * has one (eg: FRV). + */ +extern const unsigned int kallsyms_num_syms +__section(".rodata") __attribute__((weak)); + +extern const unsigned long kallsyms_relative_base +__section(".rodata") __attribute__((weak)); + +extern const char kallsyms_token_table[] __weak; +extern const u16 kallsyms_token_index[] __weak; + +extern const unsigned int kallsyms_markers[] __weak; + +#endif // LINUX_KALLSYMS_INTERNAL_H_ From 4b6fea8f98fa0be6d63167501435c466ffe95c76 Mon Sep 17 00:00:00 2001 From: Stephen Brennan Date: Mon, 16 May 2022 17:05:08 -0700 Subject: [PATCH 297/321] vmcoreinfo: include kallsyms symbols The internal kallsyms tables contain information which could be quite useful to a debugging tool in the absence of other debuginfo. If kallsyms is enabled, then a debugging tool could parse it and use it as a fallback symbol table. Combined with BTF data, live & post-mortem debuggers can support basic operations without needing a large DWARF debuginfo file available. As many as five symbols are necessary to properly parse kallsyms names and addresses. Add these to the vmcoreinfo note. CONFIG_KALLSYMS_ABSOLUTE_PERCPU does impact the computation of symbol addresses. However, a debugger can infer this configuration value by comparing the address of _stext in the vmcoreinfo with the address computed via kallsyms. So there's no need to include information about this config value in the vmcoreinfo note. To verify that we're still well below the maximum of 4096 bytes, I created a script[1] to compute a rough upper bound on the possible size of vmcoreinfo. On v5.18-rc7, the script reports 3106 bytes, and with this patch, the maximum become 3370 bytes. [1]: https://github.com/brenns10/kernel_stuff/blob/master/vmcoreinfosize/ Link: https://lkml.kernel.org/r/20220517000508.777145-3-stephen.s.brennan@oracle.com Signed-off-by: Stephen Brennan Acked-by: Baoquan He Cc: Bixuan Cui Cc: Dave Young Cc: David Vernet Cc: "Eric W. Biederman" Cc: Jiri Olsa Cc: Kees Cook Cc: Nick Desaulniers Cc: Sami Tolvanen Cc: Stephen Boyd Cc: Vivek Goyal Signed-off-by: Andrew Morton --- kernel/crash_core.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 71122e01623cc..f64d35e284119 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -15,6 +15,8 @@ #include +#include "kallsyms_internal.h" + /* vmcoreinfo stuff */ unsigned char *vmcoreinfo_data; size_t vmcoreinfo_size; @@ -480,6 +482,18 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE); #endif +#ifdef CONFIG_KALLSYMS + VMCOREINFO_SYMBOL(kallsyms_names); + VMCOREINFO_SYMBOL(kallsyms_token_table); + VMCOREINFO_SYMBOL(kallsyms_token_index); +#ifdef CONFIG_KALLSYMS_BASE_RELATIVE + VMCOREINFO_SYMBOL(kallsyms_offsets); + VMCOREINFO_SYMBOL(kallsyms_relative_base); +#else + VMCOREINFO_SYMBOL(kallsyms_addresses); +#endif /* CONFIG_KALLSYMS_BASE_RELATIVE */ +#endif /* CONFIG_KALLSYMS */ + arch_crash_save_vmcoreinfo(); update_vmcoreinfo_note(); From 5018658be07cd6ece7e50ea2edf347e8b4121752 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 15 Jun 2022 14:22:06 +0300 Subject: [PATCH 298/321] proc: delete unused includes Those aren't necessary after seq files won. Link: https://lkml.kernel.org/r/YqnA3mS7KBt8Z4If@localhost.localdomain Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton --- fs/proc/array.c | 1 - fs/proc/inode.c | 2 -- fs/proc/kmsg.c | 1 - fs/proc/nommu.c | 1 - fs/proc/proc_net.c | 3 --- fs/proc/proc_tty.c | 2 -- fs/proc/root.c | 3 --- fs/proc/vmcore.c | 1 - 8 files changed, 14 deletions(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index eb815759842ce..65fa603422e04 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -69,7 +69,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 73aeb4e6d32e5..fd40d60169b5a 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -26,8 +26,6 @@ #include #include -#include - #include "internal.h" static void proc_evict_inode(struct inode *inode) diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index b38ad552887fb..592e6dc7c1102 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c @@ -15,7 +15,6 @@ #include #include -#include #include extern wait_queue_head_t log_wait; diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 13452b32e2bd5..4d3493579458f 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include "internal.h" diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 913e5acefbb66..bbce6fbe779c8 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -8,9 +8,6 @@ * * proc net directory handling functions */ - -#include - #include #include #include diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index c69ff191e5d8f..5c6a5ceab2f1b 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c @@ -4,8 +4,6 @@ * * Copyright 1997, Theodore Ts'o */ - -#include #include #include #include diff --git a/fs/proc/root.c b/fs/proc/root.c index c7e3b1350ef84..5a7d15d197f8e 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -6,9 +6,6 @@ * * proc root directory handling functions */ - -#include - #include #include #include diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 4eaeb645e7596..f2aa86c421f2d 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include From 48c49d5ef7051962df9d0ac7f3d5db6fe0a2f4b7 Mon Sep 17 00:00:00 2001 From: Tao Liu Date: Mon, 27 Jun 2022 15:44:41 +0800 Subject: [PATCH 299/321] kdump: round up the total memory size to 128M for crashkernel reservation The total memory size we get in kernel is usually slightly less than the actual memory size because BIOS/firmware will reserve some memory region. So it won't export all memory as usable. E.g, on my x86_64 kvm guest with 1G memory, the total_mem value shows: UEFI boot with ovmf: 0x3faef000 Legacy boot kvm guest: 0x3ff7ec00 When specifying crashkernel=1G-2G:128M, if we have a 1G memory machine, we get total size 1023M from firmware. Then it will not fall into 1G-2G, thus no memory reserved. User will never know this, it is hard to let user know the exact total value in kernel. One way is to use dmi/smbios to get physical memory size, but it's not reliable as well. According to Prarit hardware vendors sometimes screw this up. Thus round up total size to 128M to work around this problem. This patch is a resend of [1] and rebased onto v5.19-rc2, and the original credit goes to Dave Young. [1]: http://lists.infradead.org/pipermail/kexec/2018-April/020568.html Link: https://lkml.kernel.org/r/20220627074440.187222-1-ltao@redhat.com Signed-off-by: Tao Liu Acked-by: Baoquan He Cc: Dave Young Signed-off-by: Andrew Morton --- kernel/crash_core.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/kernel/crash_core.c b/kernel/crash_core.c index f64d35e284119..07b26df453a97 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,15 @@ static int __init parse_crashkernel_mem(char *cmdline, unsigned long long *crash_base) { char *cur = cmdline, *tmp; + unsigned long long total_mem = system_ram; + + /* + * Firmware sometimes reserves some memory regions for its own use, + * so the system memory size is less than the actual physical memory + * size. Work around this by rounding up the total size to 128M, + * which is enough for most test cases. + */ + total_mem = roundup(total_mem, SZ_128M); /* for each entry of the comma-separated list */ do { @@ -89,13 +99,13 @@ static int __init parse_crashkernel_mem(char *cmdline, return -EINVAL; } cur = tmp; - if (size >= system_ram) { + if (size >= total_mem) { pr_warn("crashkernel: invalid size\n"); return -EINVAL; } /* match ? */ - if (system_ram >= start && system_ram < end) { + if (total_mem >= start && total_mem < end) { *crash_size = size; break; } From c14379fe16f56a027d367cdc061c795320f0af39 Mon Sep 17 00:00:00 2001 From: Yu Zhe Date: Tue, 28 Jun 2022 10:12:51 +0800 Subject: [PATCH 300/321] ipc/mqueue: remove unnecessary (void*) conversion Remove unnecessary void* type casting. Link: https://lkml.kernel.org/r/20220628021251.17197-1-yuzhe@nfschina.com Signed-off-by: Yu Zhe Signed-off-by: Andrew Morton --- ipc/mqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 12ad7860bb88f..f98de32aeea17 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -489,7 +489,7 @@ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) static void init_once(void *foo) { - struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; + struct mqueue_inode_info *p = foo; inode_init_once(&p->vfs_inode); } From dcd8aef03059391239362e91c197d3f2a825f63a Mon Sep 17 00:00:00 2001 From: Benjamin Segall Date: Wed, 15 Jun 2022 14:24:23 -0700 Subject: [PATCH 301/321] epoll: autoremove wakers even more aggressively If a process is killed or otherwise exits while having active network connections and many threads waiting on epoll_wait, the threads will all be woken immediately, but not removed from ep->wq. Then when network traffic scans ep->wq in wake_up, every wakeup attempt will fail, and will not remove the entries from the list. This means that the cost of the wakeup attempt is far higher than usual, does not decrease, and this also competes with the dying threads trying to actually make progress and remove themselves from the wq. Handle this by removing visited epoll wq entries unconditionally, rather than only when the wakeup succeeds - the structure of ep_poll means that the only potential loss is the timed_out->eavail heuristic, which now can race and result in a redundant ep_send_events attempt. (But only when incoming data and a timeout actually race, not on every timeout) Shakeel added: : We are seeing this issue in production with real workloads and it has : caused hard lockups. Particularly network heavy workloads with a lot : of threads in epoll_wait() can easily trigger this issue if they get : killed (oom-killed in our case). Link: https://lkml.kernel.org/r/xm26fsjotqda.fsf@google.com Signed-off-by: Ben Segall Cc: Alexander Viro Cc: Linus Torvalds Cc: Shakeel Butt Cc: Eric Dumazet Cc: Roman Penyaev Cc: Jason Baron Cc: Khazhismel Kumykov Cc: Heiher Signed-off-by: Andrew Morton --- fs/eventpoll.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index e2daa940ebce7..8b56b94e2f56f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1747,6 +1747,21 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms) return to; } +/* + * autoremove_wake_function, but remove even on failure to wake up, because we + * know that default_wake_function/ttwu will only fail if the thread is already + * woken, and in that case the ep_poll loop will remove the entry anyways, not + * try to reuse it. + */ +static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key) +{ + int ret = default_wake_function(wq_entry, mode, sync, key); + + list_del_init(&wq_entry->entry); + return ret; +} + /** * ep_poll - Retrieves ready events, and delivers them to the caller-supplied * event buffer. @@ -1828,8 +1843,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, * normal wakeup path no need to call __remove_wait_queue() * explicitly, thus ep->lock is not taken, which halts the * event delivery. + * + * In fact, we now use an even more aggressive function that + * unconditionally removes, because we don't reuse the wait + * entry between loop iterations. This lets us also avoid the + * performance issue if a process is killed, causing all of its + * threads to wake up without being removed normally. */ init_wait(&wait); + wait.func = ep_autoremove_wake_function; write_lock_irq(&ep->lock); /* From acc32b06569b47f0c1bdcbfccc4f229c0f5acaad Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 1 Jul 2022 14:35:12 +0300 Subject: [PATCH 302/321] scripts/bloat-o-meter: switch argument parsing to using argparse This will facilitate further extension to the arguments the script takes. As an added benefit it also produces saner usage output, where mutual exclusivity of the c|d|t parameters is clearly visible: ./scripts/bloat-o-meter -h usage: bloat-o-meter [-h] [-c | -d | -t] file1 file2 Simple script used to compare the symbol sizes of 2 object files positional arguments: file1 First file to compare file2 Second file to compare optional arguments: -h, --help show this help message and exit -c categorize output based on symbol type -d Show delta of Data Section -t Show delta of text Section Link: https://lkml.kernel.org/r/20220701113513.1938008-1-nborisov@suse.com Signed-off-by: Nikolay Borisov Signed-off-by: Andrew Morton --- scripts/bloat-o-meter | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index 4dd6a804ce41b..2a360118710e5 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter @@ -7,18 +7,20 @@ # This software may be used and distributed according to the terms # of the GNU General Public License, incorporated herein by reference. -import sys, os, re +import sys, os, re, argparse from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE, SIG_DFL) -if len(sys.argv) < 3: - sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0]) - sys.stderr.write("The options are:\n") - sys.stderr.write("-c categorize output based on symbol type\n") - sys.stderr.write("-d Show delta of Data Section\n") - sys.stderr.write("-t Show delta of text Section\n") - sys.exit(-1) +parser = argparse.ArgumentParser(description="Simple script used to compare the symbol sizes of 2 object files") +group = parser.add_mutually_exclusive_group() +group.add_argument('-c', help='categorize output based on symbol type', action='store_true') +group.add_argument('-d', help='Show delta of Data Section', action='store_true') +group.add_argument('-t', help='Show delta of text Section', action='store_true') +parser.add_argument('file1', help='First file to compare') +parser.add_argument('file2', help='Second file to compare') + +args = parser.parse_args() re_NUMBER = re.compile(r'\.[0-9]+') @@ -77,9 +79,9 @@ def calc(oldfile, newfile, format): delta.reverse() return grow, shrink, add, remove, up, down, delta, old, new, otot, ntot -def print_result(symboltype, symbolformat, argc): +def print_result(symboltype, symbolformat): grow, shrink, add, remove, up, down, delta, old, new, otot, ntot = \ - calc(sys.argv[argc - 1], sys.argv[argc], symbolformat) + calc(args.file1, args.file2, symbolformat) print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \ (add, remove, grow, shrink, up, -down, up-down)) @@ -93,13 +95,13 @@ def print_result(symboltype, symbolformat, argc): percent = 0 print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent)) -if sys.argv[1] == "-c": - print_result("Function", "tT", 3) - print_result("Data", "dDbB", 3) - print_result("RO Data", "rR", 3) -elif sys.argv[1] == "-d": - print_result("Data", "dDbBrR", 3) -elif sys.argv[1] == "-t": - print_result("Function", "tT", 3) +if args.c: + print_result("Function", "tT") + print_result("Data", "dDbB") + print_result("RO Data", "rR") +elif args.d: + print_result("Data", "dDbBrR") +elif args.t: + print_result("Function", "tT") else: - print_result("Function", "tTdDbBrR", 2) + print_result("Function", "tTdDbBrR") From db1b7dcb2a203e5266c098eac31e77d3380fb9f9 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 1 Jul 2022 14:35:13 +0300 Subject: [PATCH 303/321] scripts/bloat-o-meter: add -p argument When doing cross platform development on a machine sometimes it might be useful to invoke bloat-o-meter for files which haven't been build with the native toolchain. In cases when the host nm doesn't support the target one then a toolchain-specific nm could be used. Add this ability by adding the -p allowing invocations as: ./scripts/bloat-o-meter -p riscv64-unknown-linux-gnu- file1.o file2.o Link: https://lkml.kernel.org/r/20220701113513.1938008-2-nborisov@suse.com Signed-off-by: Nikolay Borisov Signed-off-by: Andrew Morton --- scripts/bloat-o-meter | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index 2a360118710e5..f9553f60a14a8 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter @@ -17,6 +17,7 @@ group = parser.add_mutually_exclusive_group() group.add_argument('-c', help='categorize output based on symbol type', action='store_true') group.add_argument('-d', help='Show delta of Data Section', action='store_true') group.add_argument('-t', help='Show delta of text Section', action='store_true') +parser.add_argument('-p', dest='prefix', help='Arch prefix for the tool being used. Useful in cross build scenarios') parser.add_argument('file1', help='First file to compare') parser.add_argument('file2', help='Second file to compare') @@ -26,7 +27,11 @@ re_NUMBER = re.compile(r'\.[0-9]+') def getsizes(file, format): sym = {} - with os.popen("nm --size-sort " + file) as f: + nm = "nm" + if args.prefix: + nm = "{}nm".format(args.prefix) + + with os.popen("{} --size-sort {}".format(nm, file)) as f: for line in f: if line.startswith("\n") or ":" in line: continue From 857ca7aa3fd08dab10803634aceb47d19564da04 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 1 Jul 2022 13:04:04 +0530 Subject: [PATCH 304/321] kexec_file: drop weak attribute from functions As requested (http://lkml.kernel.org/r/87ee0q7b92.fsf@email.froward.int.ebiederm.org), this series converts weak functions in kexec to use the #ifdef approach. Quoting the 3e35142ef99fe ("kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]") changelog: : Since commit d1bcae833b32f1 ("ELF: Don't generate unused section symbols") : [1], binutils (v2.36+) started dropping section symbols that it thought : were unused. This isn't an issue in general, but with kexec_file.c, gcc : is placing kexec_arch_apply_relocations[_add] into a separate : .text.unlikely section and the section symbol ".text.unlikely" is being : dropped. Due to this, recordmcount is unable to find a non-weak symbol in : .text.unlikely to generate a relocation record against. This patch (of 2); Drop __weak attribute from functions in kexec_file.c: - arch_kexec_kernel_image_probe() - arch_kimage_file_post_load_cleanup() - arch_kexec_kernel_image_load() - arch_kexec_locate_mem_hole() - arch_kexec_kernel_verify_sig() arch_kexec_kernel_image_load() calls into kexec_image_load_default(), so drop the static attribute for the latter. arch_kexec_kernel_verify_sig() is not overridden by any architecture, so drop the __weak attribute. Link: https://lkml.kernel.org/r/cover.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Link: https://lkml.kernel.org/r/2cd7ca1fe4d6bb6ca38e3283c717878388ed6788.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Naveen N. Rao Suggested-by: Eric Biederman Signed-off-by: Andrew Morton --- arch/arm64/include/asm/kexec.h | 4 ++- arch/powerpc/include/asm/kexec.h | 9 +++++++ arch/s390/include/asm/kexec.h | 3 +++ arch/x86/include/asm/kexec.h | 6 +++++ include/linux/kexec.h | 44 +++++++++++++++++++++++++++----- kernel/kexec_file.c | 35 ++----------------------- 6 files changed, 61 insertions(+), 40 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 9839bfc163d71..78d272b26ebd1 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -115,7 +115,9 @@ extern const struct kexec_file_ops kexec_image_ops; struct kimage; -extern int arch_kimage_file_post_load_cleanup(struct kimage *image); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + extern int load_other_segments(struct kimage *image, unsigned long kernel_load_addr, unsigned long kernel_size, char *initrd, unsigned long initrd_len, diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 2aefe14e14422..1e5e9b6ec78d9 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -120,6 +120,15 @@ int setup_purgatory(struct kimage *image, const void *slave_code, #ifdef CONFIG_PPC64 struct kexec_buf; +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len); +#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); +#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole + int load_crashdump_segments_ppc64(struct kimage *image, struct kexec_buf *kbuf); int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 649ecdcc87345..8886aadc11a3a 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -92,5 +92,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif /*_S390_KEXEC_H */ diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 6ad8d946cd3eb..5ec359c1b50cb 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -193,6 +193,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +void *arch_kexec_kernel_image_load(struct kimage *image); +#define arch_kexec_kernel_image_load arch_kexec_kernel_image_load + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ce6536f1d2699..5e0bc3f9eac3e 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -188,21 +188,53 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, void *buf, unsigned int size, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +void *kexec_image_load_default(struct kimage *image); + +#ifndef arch_kexec_kernel_image_probe +static inline int +arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) +{ + return kexec_image_probe_default(image, buf, buf_len); +} +#endif + +#ifndef arch_kimage_file_post_load_cleanup +static inline int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + return kexec_image_post_load_cleanup_default(image); +} +#endif + +#ifndef arch_kexec_kernel_image_load +static inline void *arch_kexec_kernel_image_load(struct kimage *image) +{ + return kexec_image_load_default(image); +} +#endif -/* Architectures may override the below functions */ -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void *arch_kexec_kernel_image_load(struct kimage *image); -int arch_kimage_file_post_load_cleanup(struct kimage *image); #ifdef CONFIG_KEXEC_SIG int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len); #endif -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); +#ifndef arch_kexec_locate_mem_hole +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); +} +#endif + /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 9b2839775c837..66e4ce29fc696 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -56,14 +56,7 @@ int kexec_image_probe_default(struct kimage *image, void *buf, return ret; } -/* Architectures can provide this probe function */ -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len) -{ - return kexec_image_probe_default(image, buf, buf_len); -} - -static void *kexec_image_load_default(struct kimage *image) +void *kexec_image_load_default(struct kimage *image) { if (!image->fops || !image->fops->load) return ERR_PTR(-ENOEXEC); @@ -74,11 +67,6 @@ static void *kexec_image_load_default(struct kimage *image) image->cmdline_buf_len); } -void * __weak arch_kexec_kernel_image_load(struct kimage *image) -{ - return kexec_image_load_default(image); -} - int kexec_image_post_load_cleanup_default(struct kimage *image) { if (!image->fops || !image->fops->cleanup) @@ -87,11 +75,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image) return image->fops->cleanup(image->image_loader_data); } -int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) -{ - return kexec_image_post_load_cleanup_default(image); -} - #ifdef CONFIG_KEXEC_SIG static int kexec_image_verify_sig_default(struct kimage *image, void *buf, unsigned long buf_len) @@ -104,8 +87,7 @@ static int kexec_image_verify_sig_default(struct kimage *image, void *buf, return image->fops->verify_sig(buf, buf_len); } -int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len) +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len) { return kexec_image_verify_sig_default(image, buf, buf_len); } @@ -616,19 +598,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) return ret == 1 ? 0 : -EADDRNOTAVAIL; } -/** - * arch_kexec_locate_mem_hole - Find free memory to place the segments. - * @kbuf: Parameters for the memory search. - * - * On success, kbuf->mem will have the start address of the memory region found. - * - * Return: 0 on success, negative errno on error. - */ -int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) -{ - return kexec_locate_mem_hole(kbuf); -} - /** * kexec_add_buffer - place a buffer in a kexec segment * @kbuf: Buffer contents and memory parameters. From c171d3e0249cb795cdebe3eb6e030fbe1c22c075 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 1 Jul 2022 13:04:05 +0530 Subject: [PATCH 305/321] kexec: drop weak attribute from functions Drop __weak attribute from functions in kexec_core.c: - machine_kexec_post_load() - arch_kexec_protect_crashkres() - arch_kexec_unprotect_crashkres() - crash_free_reserved_phys_range() Link: https://lkml.kernel.org/r/c0f6219e03cb399d166d518ab505095218a902dd.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Naveen N. Rao Suggested-by: Eric Biederman Signed-off-by: Andrew Morton --- arch/arm64/include/asm/kexec.h | 16 ++++++++++++++-- arch/powerpc/include/asm/kexec.h | 5 +++++ arch/s390/include/asm/kexec.h | 11 +++++++++++ arch/x86/include/asm/kexec.h | 6 ++++++ include/linux/kexec.h | 32 ++++++++++++++++++++++++++++---- kernel/kexec_core.c | 27 --------------------------- 6 files changed, 64 insertions(+), 33 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 78d272b26ebd1..559bfae267153 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs, extern bool crash_is_nosave(unsigned long pfn); extern void crash_prepare_suspend(void); extern void crash_post_resume(void); + +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range #else static inline bool crash_is_nosave(unsigned long pfn) {return false; } static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +struct kimage; + #if defined(CONFIG_KEXEC_CORE) void cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); + +int machine_kexec_post_load(struct kimage *image); +#define machine_kexec_post_load machine_kexec_post_load + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres #endif #define ARCH_HAS_KIMAGE_ARCH @@ -113,8 +127,6 @@ struct kimage_arch { #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; -struct kimage; - int arch_kimage_file_post_load_cleanup(struct kimage *image); #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 1e5e9b6ec78d9..d6f4edfe4737b 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -98,6 +98,11 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co void kexec_copy_flush(struct kimage *image); +#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS) +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range +#endif + #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 8886aadc11a3a..1bd08eb56d5fc 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -85,6 +85,17 @@ struct kimage_arch { extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_elf_ops; +#ifdef CONFIG_CRASH_DUMP +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres +#endif + #ifdef CONFIG_KEXEC_FILE struct purgatory_info; int arch_kexec_apply_relocations_add(struct purgatory_info *pi, diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 5ec359c1b50cb..a3760ca796aa2 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -186,6 +186,12 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres + #ifdef CONFIG_KEXEC_FILE struct purgatory_info; int arch_kexec_apply_relocations_add(struct purgatory_info *pi, diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 5e0bc3f9eac3e..c358c51c10708 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -390,7 +390,10 @@ extern void machine_kexec_cleanup(struct kimage *image); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); -int machine_kexec_post_load(struct kimage *image); + +#ifndef machine_kexec_post_load +static inline int machine_kexec_post_load(struct kimage *image) { return 0; } +#endif extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); @@ -423,10 +426,21 @@ extern bool kexec_in_progress; int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); -void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -void arch_kexec_protect_crashkres(void); -void arch_kexec_unprotect_crashkres(void); +#ifndef arch_kexec_protect_crashkres +/* + * Protection mechanism for crashkernel reserved memory after + * the kdump kernel is loaded. + * + * Provide an empty default implementation here -- architecture + * code may override this + */ +static inline void arch_kexec_protect_crashkres(void) { } +#endif + +#ifndef arch_kexec_unprotect_crashkres +static inline void arch_kexec_unprotect_crashkres(void) { } +#endif #ifndef page_to_boot_pfn static inline unsigned long page_to_boot_pfn(struct page *page) @@ -456,6 +470,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) } #endif +#ifndef crash_free_reserved_phys_range +static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) + free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); +} +#endif + static inline unsigned long virt_to_boot_phys(void *addr) { return phys_to_boot_phys(__pa((unsigned long)addr)); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 4d34c78334ce4..acd029b307e42 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -591,11 +591,6 @@ static void kimage_free_extra_pages(struct kimage *image) } -int __weak machine_kexec_post_load(struct kimage *image) -{ - return 0; -} - void kimage_terminate(struct kimage *image) { if (*image->entry != 0) @@ -1020,15 +1015,6 @@ size_t crash_get_memory_size(void) return size; } -void __weak crash_free_reserved_phys_range(unsigned long begin, - unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) - free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); -} - int crash_shrink_memory(unsigned long new_size) { int ret = 0; @@ -1225,16 +1211,3 @@ int kernel_kexec(void) mutex_unlock(&kexec_mutex); return error; } - -/* - * Protection mechanism for crashkernel reserved memory after - * the kdump kernel is loaded. - * - * Provide an empty default implementation here -- architecture - * code may override this - */ -void __weak arch_kexec_protect_crashkres(void) -{} - -void __weak arch_kexec_unprotect_crashkres(void) -{} From 85c632e2d87d6e2da593e18bf4f88fdeaaa17236 Mon Sep 17 00:00:00 2001 From: Sander Vanheule Date: Sat, 2 Jul 2022 18:08:24 +0200 Subject: [PATCH 306/321] x86/cacheinfo: move shared cache map definitions Patch series "cpumask: Fix invalid uniprocessor assumptions", v4. On uniprocessor builds, it is currently assumed that any cpumask will contain the single CPU: cpu0. This assumption is used to provide optimised implementations. The current assumption also appears to be wrong, by ignoring the fact that users can provide empty cpumasks. This can result in bugs as explained in [1] - for_each_cpu() will run one iteration of the loop even when passed an empty cpumask. This series introduces some basic tests, and updates the optimisations for uniprocessor builds. The x86 patch was written after the kernel test robot [2] ran into a failed build. I have tried to list the files potentially affected by the changes to cpumask.h, in an attempt to find any other cases that fail on !SMP. I've gone through some of the files manually, and ran a few cross builds, but nothing else popped up. I (build) checked about half of the potientally affected files, but I do not have the resources to do them all. I hope we can fix other issues if/when they pop up later. [1] https://lore.kernel.org/all/20220530082552.46113-1-sander@svanheule.net/ [2] https://lore.kernel.org/all/202206060858.wA0FOzRy-lkp@intel.com/ This patch (of 5): The maps to keep track of shared caches between CPUs on SMP systems are declared in asm/smp.h, among them specifically cpu_llc_shared_map. These maps are externally defined in cpu/smpboot.c. The latter is only compiled on CONFIG_SMP=y, which means the declared extern symbols from asm/smp.h do not have a corresponding definition on uniprocessor builds. The inline cpu_llc_shared_mask() function from asm/smp.h refers to the map declaration mentioned above. This function is referenced in cacheinfo.c inside for_each_cpu() loop macros, to provide cpumask for the loop. On uniprocessor builds, the symbol for the cpu_llc_shared_map does not exist. However, the current implementation of for_each_cpu() also (wrongly) ignores the provided mask. By sheer luck, the compiler thus optimises out this unused reference to cpu_llc_shared_map, and the linker therefore does not require the cpu_llc_shared_mask to actually exist on uniprocessor builds. Only on SMP bulids does smpboot.o exist to provide the required symbols. To no longer rely on compiler optimisations for successful uniprocessor builds, move the definitions of cpu_llc_shared_map and cpu_l2c_shared_map from smpboot.c to cacheinfo.c. Link: https://lkml.kernel.org/r/cover.1656777646.git.sander@svanheule.net Link: https://lkml.kernel.org/r/e8167ddb570f56744a3dc12c2149a660a324d969.1656777646.git.sander@svanheule.net Signed-off-by: Sander Vanheule Cc: Andy Shevchenko Cc: Marco Elver Cc: Greg Kroah-Hartman Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valentin Schneider Cc: Yury Norov Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton --- arch/x86/kernel/cpu/cacheinfo.c | 6 ++++++ arch/x86/kernel/smpboot.c | 4 ---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index fe98a1465be6a..66556833d7af5 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -29,6 +29,12 @@ #define LVL_3 4 #define LVL_TRACE 5 +/* Shared last level cache maps */ +DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); + +/* Shared L2 cache maps */ +DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); + struct _cache_table { unsigned char descriptor; char cache_type; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5e7f9532a10d0..f24227bc3220a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -95,10 +95,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); EXPORT_PER_CPU_SYMBOL(cpu_die_map); -DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); - -DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); - /* Per CPU bogomips and other parameters */ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); From e73e6f4e756b692de600ca67857c14a957698308 Mon Sep 17 00:00:00 2001 From: Sander Vanheule Date: Sat, 2 Jul 2022 18:08:25 +0200 Subject: [PATCH 307/321] cpumask: Fix invalid uniprocessor mask assumption On uniprocessor builds, any CPU mask is assumed to contain exactly one CPU (cpu0). This assumption ignores the existence of empty masks, resulting in incorrect behaviour. cpumask_first_zero(), cpumask_next_zero(), and for_each_cpu_not() don't provide behaviour matching the assumption that a UP mask is always "1", and instead provide behaviour matching the empty mask. Drop the incorrectly optimised code and use the generic implementations in all cases. Link: https://lkml.kernel.org/r/86bf3f005abba2d92120ddd0809235cab4f759a6.1656777646.git.sander@svanheule.net Signed-off-by: Sander Vanheule Suggested-by: Yury Norov Cc: Andy Shevchenko Cc: Borislav Petkov Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Marco Elver Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valentin Schneider Signed-off-by: Andrew Morton --- include/linux/cpumask.h | 99 ++++++++--------------------------------- lib/Makefile | 3 +- lib/cpumask.c | 2 + 3 files changed, 22 insertions(+), 82 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index fe29ac7cc469c..7fbef41b3093f 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -116,85 +116,6 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu) return cpu; } -#if NR_CPUS == 1 -/* Uniprocessor. Assume all masks are "1". */ -static inline unsigned int cpumask_first(const struct cpumask *srcp) -{ - return 0; -} - -static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) -{ - return 0; -} - -static inline unsigned int cpumask_first_and(const struct cpumask *srcp1, - const struct cpumask *srcp2) -{ - return 0; -} - -static inline unsigned int cpumask_last(const struct cpumask *srcp) -{ - return 0; -} - -/* Valid inputs for n are -1 and 0. */ -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_and(int n, - const struct cpumask *srcp, - const struct cpumask *andp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, - int start, bool wrap) -{ - /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ - return (wrap && n == 0); -} - -/* cpu must be a valid cpu, ie 0, so there's no other choice. */ -static inline unsigned int cpumask_any_but(const struct cpumask *mask, - unsigned int cpu) -{ - return 1; -} - -static inline unsigned int cpumask_local_spread(unsigned int i, int node) -{ - return 0; -} - -static inline int cpumask_any_and_distribute(const struct cpumask *src1p, - const struct cpumask *src2p) { - return cpumask_first_and(src1p, src2p); -} - -static inline int cpumask_any_distribute(const struct cpumask *srcp) -{ - return cpumask_first(srcp); -} - -#define for_each_cpu(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#define for_each_cpu_not(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#define for_each_cpu_wrap(cpu, mask, start) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) -#define for_each_cpu_and(cpu, mask1, mask2) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) -#else /** * cpumask_first - get the first cpu in a cpumask * @srcp: the cpumask pointer @@ -260,10 +181,29 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); + +#if NR_CPUS == 1 +/* Uniprocessor: there is only one valid CPU */ +static inline unsigned int cpumask_local_spread(unsigned int i, int node) +{ + return 0; +} + +static inline int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p) { + return cpumask_first_and(src1p, src2p); +} + +static inline int cpumask_any_distribute(const struct cpumask *srcp) +{ + return cpumask_first(srcp); +} +#else unsigned int cpumask_local_spread(unsigned int i, int node); int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p); int cpumask_any_distribute(const struct cpumask *srcp); +#endif /* NR_CPUS */ /** * for_each_cpu - iterate over every cpu in a mask @@ -324,7 +264,6 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool for ((cpu) = -1; \ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) -#endif /* SMP */ #define CPU_BITS_NONE \ { \ diff --git a/lib/Makefile b/lib/Makefile index f99bf61f8bbc6..bcc7e8ea0cde5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -34,10 +34,9 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \ - buildid.o + buildid.o cpumask.o lib-$(CONFIG_PRINTK) += dump_stack.o -lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o klist.o obj-y += lockref.o diff --git a/lib/cpumask.c b/lib/cpumask.c index a971a82d2f436..b9728513a4d40 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -192,6 +192,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) } #endif +#if NR_CPUS > 1 /** * cpumask_local_spread - select the i'th cpu with local numa cpu's first * @i: index number @@ -279,3 +280,4 @@ int cpumask_any_distribute(const struct cpumask *srcp) return next; } EXPORT_SYMBOL(cpumask_any_distribute); +#endif /* NR_CPUS */ From 4bda9a614c0e5b6b69723a1d47bf678cd4ef76ef Mon Sep 17 00:00:00 2001 From: Sander Vanheule Date: Sat, 2 Jul 2022 18:08:26 +0200 Subject: [PATCH 308/321] lib/test: introduce cpumask KUnit test suite Add a basic suite of tests for cpumask, providing some tests for empty and completely filled cpumasks. Link: https://lkml.kernel.org/r/c96980ec35c3bd23f17c3374bf42c22971545e85.1656777646.git.sander@svanheule.net Signed-off-by: Sander Vanheule Reviewed-by: Andy Shevchenko Suggested-by: Yury Norov Cc: Borislav Petkov Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Marco Elver Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valentin Schneider Signed-off-by: Andrew Morton --- lib/Kconfig.debug | 9 +++ lib/Makefile | 1 + lib/test_cpumask.c | 138 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+) create mode 100644 lib/test_cpumask.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2e24db4bff192..04aaa20d50f98 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2021,6 +2021,15 @@ config LKDTM Documentation on how to use the module can be found in Documentation/fault-injection/provoke-crashes.rst +config TEST_CPUMASK + tristate "cpumask tests" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable to turn on cpumask tests, running at boot or module load time. + + If unsure, say N. + config TEST_LIST_SORT tristate "Linked list sorting test" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Makefile b/lib/Makefile index bcc7e8ea0cde5..de3e47453fe8e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o +obj-$(CONFIG_TEST_CPUMASK) += test_cpumask.o CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o # diff --git a/lib/test_cpumask.c b/lib/test_cpumask.c new file mode 100644 index 0000000000000..a31a1622f1f6e --- /dev/null +++ b/lib/test_cpumask.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for cpumask. + * + * Author: Sander Vanheule + */ + +#include +#include +#include + +#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \ + do { \ + const cpumask_t *m = (mask); \ + int mask_weight = cpumask_weight(m); \ + int cpu, iter = 0; \ + for_each_cpu(cpu, m) \ + iter++; \ + KUNIT_EXPECT_EQ((test), mask_weight, iter); \ + } while (0) + +#define EXPECT_FOR_EACH_CPU_NOT_EQ(test, mask) \ + do { \ + const cpumask_t *m = (mask); \ + int mask_weight = cpumask_weight(m); \ + int cpu, iter = 0; \ + for_each_cpu_not(cpu, m) \ + iter++; \ + KUNIT_EXPECT_EQ((test), nr_cpu_ids - mask_weight, iter); \ + } while (0) + +#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \ + do { \ + const cpumask_t *m = (mask); \ + int mask_weight = cpumask_weight(m); \ + int cpu, iter = 0; \ + for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \ + iter++; \ + KUNIT_EXPECT_EQ((test), mask_weight, iter); \ + } while (0) + +#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \ + do { \ + int mask_weight = num_##name##_cpus(); \ + int cpu, iter = 0; \ + for_each_##name##_cpu(cpu) \ + iter++; \ + KUNIT_EXPECT_EQ((test), mask_weight, iter); \ + } while (0) + +static cpumask_t mask_empty; +static cpumask_t mask_all; + +static void test_cpumask_weight(struct kunit *test) +{ + KUNIT_EXPECT_TRUE(test, cpumask_empty(&mask_empty)); + KUNIT_EXPECT_TRUE(test, cpumask_full(cpu_possible_mask)); + KUNIT_EXPECT_TRUE(test, cpumask_full(&mask_all)); + + KUNIT_EXPECT_EQ(test, 0, cpumask_weight(&mask_empty)); + KUNIT_EXPECT_EQ(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask)); + KUNIT_EXPECT_EQ(test, nr_cpumask_bits, cpumask_weight(&mask_all)); +} + +static void test_cpumask_first(struct kunit *test) +{ + KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_first(&mask_empty)); + KUNIT_EXPECT_EQ(test, 0, cpumask_first(cpu_possible_mask)); + + KUNIT_EXPECT_EQ(test, 0, cpumask_first_zero(&mask_empty)); + KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask)); +} + +static void test_cpumask_last(struct kunit *test) +{ + KUNIT_EXPECT_LE(test, nr_cpumask_bits, cpumask_last(&mask_empty)); + KUNIT_EXPECT_EQ(test, nr_cpumask_bits - 1, cpumask_last(cpu_possible_mask)); +} + +static void test_cpumask_next(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 0, cpumask_next_zero(-1, &mask_empty)); + KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask)); + + KUNIT_EXPECT_LE(test, nr_cpu_ids, cpumask_next(-1, &mask_empty)); + KUNIT_EXPECT_EQ(test, 0, cpumask_next(-1, cpu_possible_mask)); +} + +static void test_cpumask_iterators(struct kunit *test) +{ + EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty); + EXPECT_FOR_EACH_CPU_NOT_EQ(test, &mask_empty); + EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty); + + EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask); + EXPECT_FOR_EACH_CPU_NOT_EQ(test, cpu_possible_mask); + EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask); +} + +static void test_cpumask_iterators_builtin(struct kunit *test) +{ + EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible); + + /* Ensure the dynamic masks are stable while running the tests */ + cpu_hotplug_disable(); + + EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online); + EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present); + + cpu_hotplug_enable(); +} + +static int test_cpumask_init(struct kunit *test) +{ + cpumask_clear(&mask_empty); + cpumask_setall(&mask_all); + + return 0; +} + +static struct kunit_case test_cpumask_cases[] = { + KUNIT_CASE(test_cpumask_weight), + KUNIT_CASE(test_cpumask_first), + KUNIT_CASE(test_cpumask_last), + KUNIT_CASE(test_cpumask_next), + KUNIT_CASE(test_cpumask_iterators), + KUNIT_CASE(test_cpumask_iterators_builtin), + {} +}; + +static struct kunit_suite test_cpumask_suite = { + .name = "cpumask", + .init = test_cpumask_init, + .test_cases = test_cpumask_cases, +}; +kunit_test_suite(test_cpumask_suite); + +MODULE_LICENSE("GPL"); From 8f44d65f7a0998277f964769435e37fb26577d58 Mon Sep 17 00:00:00 2001 From: Sander Vanheule Date: Sat, 2 Jul 2022 18:08:27 +0200 Subject: [PATCH 309/321] cpumask: add UP optimised for_each_*_cpu versions On uniprocessor builds, the following loops will always run over a mask that contains one enabled CPU (cpu0): - for_each_possible_cpu - for_each_online_cpu - for_each_present_cpu Provide uniprocessor-specific macros for these loops, that always run exactly once. Link: https://lkml.kernel.org/r/3a92869b902a075b97be5d1452c9c6badbbff0df.1656777646.git.sander@svanheule.net Signed-off-by: Sander Vanheule Acked-by: Yury Norov Cc: Andy Shevchenko Cc: Borislav Petkov Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Marco Elver Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valentin Schneider Signed-off-by: Andrew Morton --- include/linux/cpumask.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 7fbef41b3093f..6c5b4ee000f2e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -750,9 +750,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); /* First bits of cpu_bit_bitmap are in fact unset. */ #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) +#if NR_CPUS == 1 +/* Uniprocessor: the possible/online/present masks are always "1" */ +#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#else #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) +#endif /* Wrappers for arch boot code to manipulate normally-constant masks */ void init_cpu_present(const struct cpumask *src); From 6959acb94f474189ecad3705f7d3fad6d158e2f1 Mon Sep 17 00:00:00 2001 From: Sander Vanheule Date: Sat, 2 Jul 2022 18:08:28 +0200 Subject: [PATCH 310/321] cpumask: update cpumask_next_wrap() signature The extern specifier is not needed for this declaration, so drop it. The function also depends only on the input parameters, and has no side effects, so it can be marked __pure like other functions in cpumask.h. Link: https://lkml.kernel.org/r/72ab755695b74bb5fbaa756ae4c0edd708d172f1.1656777646.git.sander@svanheule.net Signed-off-by: Sander Vanheule Reviewed-by: Andy Shevchenko Cc: Borislav Petkov Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Marco Elver Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Valentin Schneider Cc: Yury Norov Signed-off-by: Andrew Morton --- include/linux/cpumask.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 6c5b4ee000f2e..523857884ae44 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -229,7 +229,7 @@ int cpumask_any_distribute(const struct cpumask *srcp); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) -extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); +int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location From db40a728324366fd708b00cfdb77da6e0ae5e5de Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 22 Jun 2022 14:46:31 +0900 Subject: [PATCH 311/321] lib/smp_processor_id: fix imbalanced instrumentation_end() call Currently instrumentation_end() won't be called if printk_ratelimit() returned false. Link: https://lkml.kernel.org/r/a636d8e0-ad32-5888-acac-671f7f553bb3@I-love.SAKURA.ne.jp Fixes: 126f21f0e8d46e2c ("lib/smp_processor_id: Move it into noinstr section") Signed-off-by: Tetsuo Handa Cc: Thomas Gleixner Cc: Alexandre Chartre Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- lib/smp_processor_id.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 046ac6297c781..a2bb7738c373c 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -47,9 +47,9 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) printk("caller is %pS\n", __builtin_return_address(0)); dump_stack(); - instrumentation_end(); out_enable: + instrumentation_end(); preempt_enable_no_resched_notrace(); out: return this_cpu; From 6313260f9ebaa29f8f1b3f3e7124ad003770c9db Mon Sep 17 00:00:00 2001 From: Mark-PK Tsai Date: Fri, 8 Jul 2022 21:19:47 +0800 Subject: [PATCH 312/321] lib: devres: use numa aware allocation Allocate device resource from local node memory when the numa locality of the device is specified. Link: https://lkml.kernel.org/r/20220708131952.14500-1-mark-pk.tsai@mediatek.com Signed-off-by: Mark-PK Tsai Cc: Matthias Brugger Cc: YJ Chiang Cc: Hans de Goede Cc: Thomas Zimmermann Cc: Zhen Lei Cc: Jacob Keller Signed-off-by: Andrew Morton --- lib/devres.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/devres.c b/lib/devres.c index 14664bbb48757..55eb07e80cbb3 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -29,7 +29,8 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, { void __iomem **ptr, *addr = NULL; - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); + ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); if (!ptr) return NULL; @@ -292,7 +293,8 @@ void __iomem *devm_ioport_map(struct device *dev, unsigned long port, { void __iomem **ptr, *addr; - ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); + ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); if (!ptr) return NULL; @@ -366,7 +368,8 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) if (dr) return dr->table; - new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); + new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, + dev_to_node(&pdev->dev)); if (!new_dr) return NULL; dr = devres_get(&pdev->dev, new_dr, NULL, NULL); @@ -548,7 +551,8 @@ int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long int *mtrr; int ret; - mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL); + mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, + dev_to_node(dev)); if (!mtrr) return -ENOMEM; @@ -593,7 +597,8 @@ int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, struct arch_io_reserve_memtype_wc_devres *dr; int ret; - dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL); + dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, + dev_to_node(dev)); if (!dr) return -ENOMEM; From 2f4134ac6dedd80b3e4585a7144a373b41eacd53 Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 8 Jul 2022 12:48:44 +0800 Subject: [PATCH 313/321] panic: taint kernel if tests are run Most in-kernel tests (such as KUnit tests) are not supposed to run on production systems: they may do deliberately illegal things to trigger errors, and have security implications (for example, KUnit assertions will often deliberately leak kernel addresses). Add a new taint type, TAINT_TEST to signal that a test has been run. This will be printed as 'N' (originally for kuNit, as every other sensible letter was taken.) This should discourage people from running these tests on production systems, and to make it easier to tell if tests have been run accidentally (by loading the wrong configuration, etc.) Link: https://lkml.kernel.org/r/20220708044847.531566-1-davidgow@google.com Signed-off-by: David Gow Acked-by: Luis Chamberlain Reviewed-by: Brendan Higgins Cc: Andy Shevchenko Cc: Jonathan Corbet Cc: Kees Cook Cc: Shuah Khan Cc: Greg Kroah-Hartman Cc: Masahiro Yamada Cc: Nathan Chancellor Cc: Guilherme G. Piccoli Cc: Sebastian Reichel Cc: John Ogness Cc: Daniel Latypov Cc: Jani Nikula Cc: Lucas De Marchi Cc: Aaron Tomlin Cc: Michal Marek Cc: Nick Desaulniers Signed-off-by: Andrew Morton --- Documentation/admin-guide/tainted-kernels.rst | 1 + include/linux/panic.h | 3 ++- kernel/panic.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst index ceeed7b0798da..7d80e8c307d14 100644 --- a/Documentation/admin-guide/tainted-kernels.rst +++ b/Documentation/admin-guide/tainted-kernels.rst @@ -100,6 +100,7 @@ Bit Log Number Reason that got the kernel tainted 15 _/K 32768 kernel has been live patched 16 _/X 65536 auxiliary taint, defined for and used by distros 17 _/T 131072 kernel was built with the struct randomization plugin + 18 _/N 262144 an in-kernel test has been run === === ====== ======================================================== Note: The character ``_`` is representing a blank in this table to make reading diff --git a/include/linux/panic.h b/include/linux/panic.h index e71161da69c4b..c7759b3f20452 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -68,7 +68,8 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) #define TAINT_LIVEPATCH 15 #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 -#define TAINT_FLAGS_COUNT 18 +#define TAINT_TEST 18 +#define TAINT_FLAGS_COUNT 19 #define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { diff --git a/kernel/panic.c b/kernel/panic.c index a3308af28a213..c6eb8f8db0c05 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -428,6 +428,7 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, [ TAINT_AUX ] = { 'X', ' ', true }, [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, + [ TAINT_TEST ] = { 'N', ' ', true }, }; /** From 277fda9c5ef32c887546b17c527d986461b3c888 Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 8 Jul 2022 12:48:45 +0800 Subject: [PATCH 314/321] module: panic: taint the kernel when selftest modules load Taint the kernel with TAINT_TEST whenever a test module loads, by adding a new "TEST" module property, and setting it for all modules in the tools/testing directory. This property can also be set manually, for tests which live outside the tools/testing directory with: MODULE_INFO(test, "Y"); Link: https://lkml.kernel.org/r/20220708044847.531566-2-davidgow@google.com Signed-off-by: David Gow Reviewed-by: Luis Chamberlain Reviewed-by: Aaron Tomlin Acked-by: Brendan Higgins Cc: Andy Shevchenko Cc: Daniel Latypov Cc: Greg Kroah-Hartman Cc: Guilherme G. Piccoli Cc: Jani Nikula Cc: John Ogness Cc: Jonathan Corbet Cc: Kees Cook Cc: Lucas De Marchi Cc: Masahiro Yamada Cc: Michal Marek Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Sebastian Reichel Cc: Shuah Khan Signed-off-by: Andrew Morton --- kernel/module/main.c | 7 +++++++ scripts/mod/modpost.c | 3 +++ 2 files changed, 10 insertions(+) diff --git a/kernel/module/main.c b/kernel/module/main.c index fed58d30725de..4723f13167095 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1988,6 +1988,13 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) /* Set up license info based on the info section */ set_license(mod, get_modinfo(info, "license")); + if (get_modinfo(info, "test")) { + if (!test_taint(TAINT_TEST)) + pr_warn("%s: loading test module taints kernel.\n", + mod->name); + add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); + } + return 0; } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 620dc8c4c8140..29474cee10b1c 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2191,6 +2191,9 @@ static void add_header(struct buffer *b, struct module *mod) if (strstarts(mod->name, "drivers/staging")) buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n"); + + if (strstarts(mod->name, "tools/testing")) + buf_printf(b, "\nMODULE_INFO(test, \"Y\");\n"); } static void add_exported_symbols(struct buffer *buf, struct module *mod) From 7267b339e29fbd03dbabeac30b4d4354e75ff8bb Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 8 Jul 2022 12:48:46 +0800 Subject: [PATCH 315/321] kunit: taint the kernel when KUnit tests are run Make KUnit trigger the new TAINT_TEST taint when any KUnit test is run. Due to KUnit tests not being intended to run on production systems, and potentially causing problems (or security issues like leaking kernel addresses), the kernel's state should not be considered safe for production use after KUnit tests are run. This both marks KUnit modules as test modules using MODULE_INFO() and manually taints the kernel when tests are run (which catches builtin tests). Link: https://lkml.kernel.org/r/20220708044847.531566-3-davidgow@google.com Signed-off-by: David Gow Acked-by: Luis Chamberlain Tested-by: Daniel Latypov Reviewed-by: Brendan Higgins Cc: Aaron Tomlin Cc: Andy Shevchenko Cc: Greg Kroah-Hartman Cc: Guilherme G. Piccoli Cc: Jani Nikula Cc: John Ogness Cc: Jonathan Corbet Cc: Kees Cook Cc: Lucas De Marchi Cc: Masahiro Yamada Cc: Michal Marek Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Sebastian Reichel Cc: Shuah Khan Signed-off-by: Andrew Morton --- include/kunit/test.h | 3 ++- lib/kunit/test.c | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/kunit/test.h b/include/kunit/test.h index 8ffcd7de96070..ccae848720dc2 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -277,7 +277,8 @@ static inline int kunit_run_all_tests(void) { \ return __kunit_test_suites_exit(__suites); \ } \ - module_exit(kunit_test_suites_exit) + module_exit(kunit_test_suites_exit) \ + MODULE_INFO(test, "Y"); #else #define kunit_test_suites_for_module(__suites) #endif /* MODULE */ diff --git a/lib/kunit/test.c b/lib/kunit/test.c index a5053a07409ff..8b11552dc215e 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -501,6 +502,9 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats suite_stats = { 0 }; struct kunit_result_stats total_stats = { 0 }; + /* Taint the kernel so we know we've run tests. */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + if (suite->suite_init) { suite->suite_init_err = suite->suite_init(suite); if (suite->suite_init_err) { From 3c8efdfdabc56ecd036ac409561860aeb8b5e1cc Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 8 Jul 2022 12:48:47 +0800 Subject: [PATCH 316/321] selftest: taint kernel when test module loaded Make any kselftest test module (using the kselftest_module framework) taint the kernel with TAINT_TEST on module load. Also mark the module as a test module using MODULE_INFO(test, "Y") so that other tools can tell this is a test module. We can't rely solely on this, though, as these test modules are also often built-in. Finally, update the kselftest documentation to mention that the kernel should be tainted, and how to do so manually (as below). Note that several selftests use kernel modules which are not based on the kselftest_module framework, and so will not automatically taint the kernel. This can be done in two ways: - Moving the module to the tools/testing directory. All modules under this directory will taint the kernel. - Adding the 'test' module property with: MODULE_INFO(test, "Y") Similarly, selftests which do not load modules into the kernel generally should not taint the kernel (or possibly should only do so on failure), as it's assumed that testing from user-space should be safe. Regardless, they can write to /proc/sys/kernel/tainted if required. Link: https://lkml.kernel.org/r/20220708044847.531566-4-davidgow@google.com Signed-off-by: David Gow Reviewed-by: Luis Chamberlain Acked-by: Brendan Higgins Cc: Aaron Tomlin Cc: Andy Shevchenko Cc: Daniel Latypov Cc: Greg Kroah-Hartman Cc: Guilherme G. Piccoli Cc: Jani Nikula Cc: John Ogness Cc: Jonathan Corbet Cc: Kees Cook Cc: Lucas De Marchi Cc: Masahiro Yamada Cc: Michal Marek Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Sebastian Reichel Cc: Shuah Khan Signed-off-by: Andrew Morton --- Documentation/dev-tools/kselftest.rst | 9 +++++++++ tools/testing/selftests/kselftest_module.h | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst index a833ecf12fbc1..1096a98335509 100644 --- a/Documentation/dev-tools/kselftest.rst +++ b/Documentation/dev-tools/kselftest.rst @@ -250,6 +250,14 @@ assist writing kernel modules that are for use with kselftest: - ``tools/testing/selftests/kselftest_module.h`` - ``tools/testing/selftests/kselftest/module.sh`` +Note that test modules should taint the kernel with TAINT_TEST. This will +happen automatically for modules which are in the ``tools/testing/`` +directory, or for modules which use the ``kselftest_module.h`` header above. +Otherwise, you'll need to add ``MODULE_INFO(test, "Y")`` to your module +source. selftests which do not load modules typically should not taint the +kernel, but in cases where a non-test module is loaded, TEST_TAINT can be +applied from userspace by writing to ``/proc/sys/kernel/tainted``. + How to use ---------- @@ -308,6 +316,7 @@ A bare bones test module might look like this: KSTM_MODULE_LOADERS(test_foo); MODULE_AUTHOR("John Developer "); MODULE_LICENSE("GPL"); + MODULE_INFO(test, "Y"); Example test script ------------------- diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h index e2ea41de3f354..63cd7487373fd 100644 --- a/tools/testing/selftests/kselftest_module.h +++ b/tools/testing/selftests/kselftest_module.h @@ -3,6 +3,7 @@ #define __KSELFTEST_MODULE_H #include +#include /* * Test framework for writing test modules to be loaded by kselftest. @@ -41,6 +42,7 @@ static inline int kstm_report(unsigned int total_tests, unsigned int failed_test static int __init __module##_init(void) \ { \ pr_info("loaded.\n"); \ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); \ selftest(); \ return kstm_report(total_tests, failed_tests, skipped_tests); \ } \ @@ -51,4 +53,6 @@ static void __exit __module##_exit(void) \ module_init(__module##_init); \ module_exit(__module##_exit) +MODULE_INFO(test, "Y"); + #endif /* __KSELFTEST_MODULE_H */ From 02c9c493ead29d1f4b2bf05301a8730bfdef95d1 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 8 Jul 2022 09:43:01 +0800 Subject: [PATCH 317/321] autofs: use inode permission method for write access Patch series "autofs: misc patches". This series contains several patches that resulted mostly from comments made by Al Viro (quite a long time ago now). This patch (of 5): Eliminate some code duplication from mkdir/rmdir/symlink/unlink methods by using the inode operation .permission(). Link: https://lkml.kernel.org/r/165724445154.30914.10970894936827635879.stgit@donald.themaw.net Link: https://lkml.kernel.org/r/165724458096.30914.13499431569758625806.stgit@donald.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Cc: David Howells Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- fs/autofs/root.c | 63 +++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 41 deletions(-) diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 91fe4548c2565..fef6ed9910221 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -10,6 +10,7 @@ #include "autofs_i.h" +static int autofs_dir_permission(struct user_namespace *, struct inode *, int); static int autofs_dir_symlink(struct user_namespace *, struct inode *, struct dentry *, const char *); static int autofs_dir_unlink(struct inode *, struct dentry *); @@ -50,6 +51,7 @@ const struct file_operations autofs_dir_operations = { const struct inode_operations autofs_dir_inode_operations = { .lookup = autofs_lookup, + .permission = autofs_dir_permission, .unlink = autofs_dir_unlink, .symlink = autofs_dir_symlink, .mkdir = autofs_dir_mkdir, @@ -526,11 +528,30 @@ static struct dentry *autofs_lookup(struct inode *dir, return NULL; } +static int autofs_dir_permission(struct user_namespace *mnt_userns, + struct inode *inode, int mask) +{ + if (mask & MAY_WRITE) { + struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); + + if (!autofs_oz_mode(sbi)) + return -EACCES; + + /* autofs_oz_mode() needs to allow path walks when the + * autofs mount is catatonic but the state of an autofs + * file system needs to be preserved over restarts. + */ + if (sbi->flags & AUTOFS_SBI_CATATONIC) + return -EACCES; + } + + return generic_permission(mnt_userns, inode, mask); +} + static int autofs_dir_symlink(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, const char *symname) { - struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; @@ -539,16 +560,6 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns, pr_debug("%s <- %pd\n", symname, dentry); - if (!autofs_oz_mode(sbi)) - return -EACCES; - - /* autofs_oz_mode() needs to allow path walks when the - * autofs mount is catatonic but the state of an autofs - * file system needs to be preserved over restarts. - */ - if (sbi->flags & AUTOFS_SBI_CATATONIC) - return -EACCES; - BUG_ON(!ino); autofs_clean_ino(ino); @@ -601,16 +612,6 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry) struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; - if (!autofs_oz_mode(sbi)) - return -EACCES; - - /* autofs_oz_mode() needs to allow path walks when the - * autofs mount is catatonic but the state of an autofs - * file system needs to be preserved over restarts. - */ - if (sbi->flags & AUTOFS_SBI_CATATONIC) - return -EACCES; - ino->count--; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; @@ -683,16 +684,6 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry) pr_debug("dentry %p, removing %pd\n", dentry, dentry); - if (!autofs_oz_mode(sbi)) - return -EACCES; - - /* autofs_oz_mode() needs to allow path walks when the - * autofs mount is catatonic but the state of an autofs - * file system needs to be preserved over restarts. - */ - if (sbi->flags & AUTOFS_SBI_CATATONIC) - return -EACCES; - if (ino->count != 1) return -ENOTEMPTY; @@ -726,16 +717,6 @@ static int autofs_dir_mkdir(struct user_namespace *mnt_userns, struct autofs_info *p_ino; struct inode *inode; - if (!autofs_oz_mode(sbi)) - return -EACCES; - - /* autofs_oz_mode() needs to allow path walks when the - * autofs mount is catatonic but the state of an autofs - * file system needs to be preserved over restarts. - */ - if (sbi->flags & AUTOFS_SBI_CATATONIC) - return -EACCES; - pr_debug("dentry %p, creating %pd\n", dentry, dentry); BUG_ON(!ino); From 50a995e32a31f9d4ca3c7ce3ec19567ba9c34487 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 8 Jul 2022 09:43:06 +0800 Subject: [PATCH 318/321] autofs: make dentry info count consistent If an autofs dentry is a mount root directory there's no ->mkdir() call to set its count to one. To make the dentry info count consistent for all autofs dentries set count to one when the dentry info struct is allocated. Link: https://lkml.kernel.org/r/165724458671.30914.2902424437132835325.stgit@donald.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Cc: David Howells Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- fs/autofs/inode.c | 1 + fs/autofs/root.c | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 9edf243713eb6..affa70360b1f8 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -20,6 +20,7 @@ struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi) INIT_LIST_HEAD(&ino->expiring); ino->last_used = jiffies; ino->sbi = sbi; + ino->count = 1; } return ino; } diff --git a/fs/autofs/root.c b/fs/autofs/root.c index fef6ed9910221..442d27d9cb1b9 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -582,7 +582,6 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns, d_add(dentry, inode); dget(dentry); - ino->count++; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; @@ -612,7 +611,6 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry) struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; - ino->count--; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); @@ -695,7 +693,6 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry) if (sbi->version < 5) autofs_clear_leaf_automount_flags(dentry); - ino->count--; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); @@ -734,7 +731,6 @@ static int autofs_dir_mkdir(struct user_namespace *mnt_userns, autofs_set_leaf_automount_flags(dentry); dget(dentry); - ino->count++; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; inc_nlink(dir); From c3923c8ef4d17d17e1be47ff0730a9c4b0f2122e Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 8 Jul 2022 09:43:12 +0800 Subject: [PATCH 319/321] autofs: use dentry info count instead of simple_empty() The dentry info. field count is used to check if a dentry is in use during expire. But, to be used for this the count field must account for the presence of child dentries in a directory dentry. Therefore it can also be used to check for an empty directory dentry which can be done without having to to take an additional lock or account for the presence of a readdir cursor dentry as is done by simple_empty(). Link: https://lkml.kernel.org/r/165724459238.30914.1504611159945950108.stgit@donald.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Cc: David Howells Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- fs/autofs/autofs_i.h | 5 +++++ fs/autofs/expire.c | 2 +- fs/autofs/root.c | 18 ++++++++---------- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 918826eaceea3..0117d6e063008 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -148,6 +148,11 @@ static inline int autofs_oz_mode(struct autofs_sb_info *sbi) task_pgrp(current) == sbi->oz_pgrp); } +static inline bool autofs_empty(struct autofs_info *ino) +{ + return ino->count < 2; +} + struct inode *autofs_get_inode(struct super_block *, umode_t); void autofs_free_ino(struct autofs_info *); diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c index b3fefd6237c36..038b3d2d9f572 100644 --- a/fs/autofs/expire.c +++ b/fs/autofs/expire.c @@ -371,7 +371,7 @@ static struct dentry *should_expire(struct dentry *dentry, return NULL; } - if (simple_empty(dentry)) + if (autofs_empty(ino)) return NULL; /* Case 2: tree mount, expire iff entire tree is not busy */ diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 442d27d9cb1b9..e0fa71eb5c058 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -79,6 +79,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file) { struct dentry *dentry = file->f_path.dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); + struct autofs_info *ino = autofs_dentry_ino(dentry); pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry); @@ -95,7 +96,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file) * it. */ spin_lock(&sbi->lookup_lock); - if (!path_is_mountpoint(&file->f_path) && simple_empty(dentry)) { + if (!path_is_mountpoint(&file->f_path) && autofs_empty(ino)) { spin_unlock(&sbi->lookup_lock); return -ENOENT; } @@ -364,7 +365,7 @@ static struct vfsmount *autofs_d_automount(struct path *path) * the mount never trigger mounts themselves (they have an * autofs trigger mount mounted on them). But v4 pseudo direct * mounts do need the leaves to trigger mounts. In this case - * we have no choice but to use the list_empty() check and + * we have no choice but to use the autofs_empty() check and * require user space behave. */ if (sbi->version > 4) { @@ -373,7 +374,7 @@ static struct vfsmount *autofs_d_automount(struct path *path) goto done; } } else { - if (!simple_empty(dentry)) { + if (!autofs_empty(ino)) { spin_unlock(&sbi->fs_lock); goto done; } @@ -428,9 +429,8 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk) if (rcu_walk) { /* We don't need fs_lock in rcu_walk mode, - * just testing 'AUTOFS_INFO_NO_RCU' is enough. - * simple_empty() takes a spinlock, so leave it - * to last. + * just testing 'AUTOFS_INF_WANT_EXPIRE' is enough. + * * We only return -EISDIR when certain this isn't * a mount-trap. */ @@ -443,9 +443,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk) inode = d_inode_rcu(dentry); if (inode && S_ISLNK(inode->i_mode)) return -EISDIR; - if (list_empty(&dentry->d_subdirs)) - return 0; - if (!simple_empty(dentry)) + if (!autofs_empty(ino)) return -EISDIR; return 0; } @@ -465,7 +463,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk) * we can avoid needless calls ->d_automount() and avoid * an incorrect ELOOP error return. */ - if ((!path_is_mountpoint(path) && !simple_empty(dentry)) || + if ((!path_is_mountpoint(path) && !autofs_empty(ino)) || (d_really_is_positive(dentry) && d_is_symlink(dentry))) status = -EISDIR; } From ec851f67a722cc1dc957533ca6a4bb2ab7122768 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 8 Jul 2022 09:43:18 +0800 Subject: [PATCH 320/321] autofs: add comment about autofs_mountpoint_changed() The function autofs_mountpoint_changed() is unusual, add a comment about two cases for which it is needed. Link: https://lkml.kernel.org/r/165724459804.30914.10974834416046555127.stgit@donald.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Cc: David Howells Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- fs/autofs/root.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/fs/autofs/root.c b/fs/autofs/root.c index e0fa71eb5c058..ca03c1cae2be1 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -291,9 +291,26 @@ static struct dentry *autofs_mountpoint_changed(struct path *path) struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); - /* - * If this is an indirect mount the dentry could have gone away - * as a result of an expire and a new one created. + /* If this is an indirect mount the dentry could have gone away + * and a new one created. + * + * This is unusual and I can't remember the case for which it + * was originally added now. But an example of how this can + * happen is an autofs indirect mount that has the "browse" + * option set and also has the "symlink" option in the autofs + * map entry. In this case the daemon will remove the browse + * directory and create a symlink as the mount leaving the + * struct path stale. + * + * Another not so obvious case is when a mount in an autofs + * indirect mount that uses the "nobrowse" option is being + * expired at the same time as a path walk. If the mount has + * been umounted but the mount point directory seen before + * becoming unhashed (during a lockless path walk) when a stat + * family system call is made the mount won't be re-mounted as + * it should. In this case the mount point that's been removed + * (by the daemon) will be stale and the a new mount point + * dentry created. */ if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; From 9c3ad2c10753cfb0bda1ece67a173a0601f3ec5c Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 8 Jul 2022 09:43:23 +0800 Subject: [PATCH 321/321] autofs: remove unused ino field inode Remove the unused inode field of the autofs dentry info structure. Link: https://lkml.kernel.org/r/165724460393.30914.6511330213821246793.stgit@donald.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Cc: David Howells Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- fs/autofs/autofs_i.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 0117d6e063008..d5a44fa88acf9 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -51,8 +51,6 @@ extern struct file_system_type autofs_fs_type; */ struct autofs_info { struct dentry *dentry; - struct inode *inode; - int flags; struct completion expire_complete;