Revert "CHROMIUM: mm: multigenerational lru: scan kvm mmu pages"

This reverts commit 684283354c75eb072588c5d1bf84c0da1cb6aeda.
This commit is contained in:
UtsavBalar1231 2022-05-25 09:52:14 +05:30 committed by spakkkk
parent d002c7ca3c
commit dbb6a16c99
6 changed files with 6 additions and 276 deletions

View File

@ -314,10 +314,6 @@ struct kvm_mmu_page {
/* Number of writes since the last time traversal visited this page. */
atomic_t write_flooding_count;
atomic_t ref_count;
struct rcu_head rcu_head;
struct list_head mmu_page_list;
};
struct kvm_pio_request {
@ -886,9 +882,6 @@ struct kvm_arch {
bool guest_can_read_msr_platform_info;
struct task_struct *nx_lpage_recovery_thread;
spinlock_t mmu_page_list_lock;
struct list_head mmu_page_list;
};
struct kvm_vm_stat {

View File

@ -2048,35 +2048,15 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
static void kvm_mmu_free_page_rcu(struct rcu_head *rcu_head)
{
struct kvm_mmu_page *sp = container_of(rcu_head, struct kvm_mmu_page,
rcu_head);
free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
kmem_cache_free(mmu_page_header_cache, sp);
}
static void kvm_mmu_put_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
if (!atomic_dec_and_test(&sp->ref_count))
return;
spin_lock(&kvm->arch.mmu_page_list_lock);
list_del_rcu(&sp->mmu_page_list);
spin_unlock(&kvm->arch.mmu_page_list_lock);
call_rcu(&sp->rcu_head, kvm_mmu_free_page_rcu);
}
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
list_del(&sp->link);
kvm_mmu_put_page(kvm, sp);
free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
kmem_cache_free(mmu_page_header_cache, sp);
}
static unsigned kvm_page_table_hashfn(gfn_t gfn)
@ -2550,10 +2530,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
atomic_set(&sp->ref_count, 1);
spin_lock(&vcpu->kvm->arch.mmu_page_list_lock);
list_add_tail_rcu(&sp->mmu_page_list, &vcpu->kvm->arch.mmu_page_list);
spin_unlock(&vcpu->kvm->arch.mmu_page_list_lock);
trace_kvm_mmu_get_page(sp, true);
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
@ -2798,7 +2774,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_free_page(kvm, sp);
kvm_mmu_free_page(sp);
}
}
@ -5623,8 +5599,6 @@ void kvm_mmu_init_vm(struct kvm *kvm)
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
INIT_LIST_HEAD(&kvm->arch.mmu_page_list);
spin_lock_init(&kvm->arch.mmu_page_list_lock);
}
void kvm_mmu_uninit_vm(struct kvm *kvm)
@ -6316,78 +6290,3 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
if (kvm->arch.nx_lpage_recovery_thread)
kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}
static void kvm_clear_young_walk(struct kvm *kvm, struct mmu_notifier_walk *walk,
struct kvm_mmu_page *sp)
{
int i;
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
u64 old, new;
struct page *page;
kvm_pfn_t pfn = -1;
new = old = mmu_spte_get_lockless(sp->spt + i);
if (is_shadow_present_pte(old)) {
if (!is_last_spte(old, sp->role.level))
continue;
pfn = spte_to_pfn(old);
if (spte_ad_enabled(old))
new = old & ~shadow_accessed_mask;
else if (!is_access_track_spte(old))
new = mark_spte_for_access_track(old);
}
page = walk->get_page(walk->private, pfn, new != old);
if (!page)
continue;
if (new != old && cmpxchg64(sp->spt + i, old, new) == old)
walk->update_page(walk->private, page);
put_page(page);
}
}
void kvm_arch_mmu_clear_young_walk(struct kvm *kvm, struct mmu_notifier_walk *walk)
{
struct kvm_mmu_page *sp;
bool started = false;
rcu_read_lock();
list_for_each_entry_rcu(sp, &kvm->arch.mmu_page_list, mmu_page_list) {
if (is_obsolete_sp(kvm, sp) || sp->role.invalid ||
sp->role.level > PT_DIRECTORY_LEVEL)
continue;
if (!started && !walk->start_batch(kvm->mm, walk->private))
break;
started = true;
kvm_clear_young_walk(kvm, walk, sp);
if (!walk->end_batch(walk->private, false))
continue;
started = false;
if (!atomic_inc_not_zero(&sp->ref_count))
continue;
rcu_read_unlock();
cond_resched();
rcu_read_lock();
kvm_mmu_put_page(kvm, sp);
}
if (started && !walk->end_batch(walk->private, true))
VM_BUG_ON(true);
rcu_read_unlock();
}

View File

@ -15,14 +15,6 @@ struct mmu_notifier_ops;
/* mmu_notifier_ops flags */
#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
struct mmu_notifier_walk {
bool (*start_batch)(struct mm_struct *mm, void *priv);
bool (*end_batch)(void *priv, bool last);
struct page *(*get_page)(void *priv, unsigned long pfn, bool young);
void (*update_page)(void *priv, struct page *page);
void *private;
};
#ifdef CONFIG_MMU_NOTIFIER
/*
@ -98,9 +90,6 @@ struct mmu_notifier_ops {
unsigned long start,
unsigned long end);
void (*clear_young_walk)(struct mmu_notifier *mn,
struct mmu_notifier_walk *walk);
/*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
@ -247,8 +236,6 @@ extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
extern int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
extern void __mmu_notifier_clear_young_walk(struct mm_struct *mm,
struct mmu_notifier_walk *walk);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@ -287,13 +274,6 @@ static inline int mmu_notifier_clear_young(struct mm_struct *mm,
return 0;
}
static inline void mmu_notifier_clear_young_walk(struct mm_struct *mm,
struct mmu_notifier_walk *walk)
{
if (mm_has_notifiers(mm))
__mmu_notifier_clear_young_walk(mm, walk);
}
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
@ -485,11 +465,6 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
static inline void mmu_notifier_clear_young_walk(struct mm_struct *mm,
struct mmu_notifier_walk *walk)
{
}
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{

View File

@ -141,20 +141,6 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
return young;
}
void __mmu_notifier_clear_young_walk(struct mm_struct *mm,
struct mmu_notifier_walk *walk)
{
int id;
struct mmu_notifier *mn;
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->clear_young_walk)
mn->ops->clear_young_walk(mn, walk);
}
srcu_read_unlock(&srcu, id);
}
int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{

View File

@ -55,7 +55,6 @@
#include <linux/shmem_fs.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@ -3499,113 +3498,6 @@ contended:
cond_resched();
} while (err == -EAGAIN && args->next_addr &&
!mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg));
}
static bool mmu_notifier_start_batch(struct mm_struct *mm, void *priv)
{
struct mm_walk_args *args = priv;
struct mem_cgroup *memcg = args->memcg;
VM_BUG_ON(!rcu_read_lock_held());
#ifdef CONFIG_MEMCG
if (memcg && atomic_read(&memcg->moving_account)) {
args->mm_stats[MM_LOCK_CONTENTION]++;
return false;
}
#endif
return !mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg);
}
static bool mmu_notifier_end_batch(void *priv, bool last)
{
struct lruvec *lruvec;
struct mm_walk_args *args = priv;
VM_BUG_ON(!rcu_read_lock_held());
if (!last && args->batch_size < MAX_BATCH_SIZE)
return false;
lruvec = mem_cgroup_lruvec(NODE_DATA(args->node_id), args->memcg);
reset_batch_size(lruvec, args);
return true;
}
static struct page *mmu_notifier_get_page(void *priv, unsigned long pfn, bool young)
{
struct page *page;
struct mm_walk_args *args = priv;
if (pfn == -1 || is_zero_pfn(pfn)) {
args->mm_stats[MM_LEAF_HOLE]++;
return NULL;
}
if (!young) {
args->mm_stats[MM_LEAF_OLD]++;
return NULL;
}
VM_BUG_ON(!pfn_valid(pfn));
if (pfn < args->start_pfn || pfn >= args->end_pfn) {
args->mm_stats[MM_LEAF_OTHER_NODE]++;
return NULL;
}
page = compound_head(pfn_to_page(pfn));
if (page_to_nid(page) != args->node_id) {
args->mm_stats[MM_LEAF_OTHER_NODE]++;
return NULL;
}
if (page_memcg_rcu(page) != args->memcg) {
args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
return NULL;
}
if (!PageLRU(page)) {
args->mm_stats[MM_LEAF_HOLE]++;
return NULL;
}
return get_page_unless_zero(page) ? page : NULL;
}
static void mmu_notifier_update_page(void *priv, struct page *page)
{
struct mm_walk_args *args = priv;
int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
if (page_memcg_rcu(page) != args->memcg) {
args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
return;
}
if (!PageLRU(page)) {
args->mm_stats[MM_LEAF_HOLE]++;
return;
}
old_gen = page_update_gen(page, new_gen);
if (old_gen >= 0 && old_gen != new_gen)
update_batch_size(page, old_gen, new_gen, args);
args->mm_stats[MM_LEAF_YOUNG]++;
}
static void call_mmu_notifier(struct mm_walk_args *args, struct mm_struct *mm)
{
struct mmu_notifier_walk walk = {
.start_batch = mmu_notifier_start_batch,
.end_batch = mmu_notifier_end_batch,
.get_page = mmu_notifier_get_page,
.update_page = mmu_notifier_update_page,
.private = args,
};
mmu_notifier_clear_young_walk(mm, &walk);
}
static void page_inc_gen(struct page *page, struct lruvec *lruvec, bool front)
@ -3797,7 +3689,6 @@ static bool walk_mm_list(struct lruvec *lruvec, unsigned long max_seq,
last = get_next_mm(args, &mm);
if (mm) {
walk_mm(args, mm);
call_mmu_notifier(args, mm);
}
cond_resched();

View File

@ -501,19 +501,6 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
return young;
}
__weak void kvm_arch_mmu_clear_young_walk(struct kvm *kvm,
struct mmu_notifier_walk *walk)
{
}
static void kvm_mmu_notifier_clear_young_walk(struct mmu_notifier *mn,
struct mmu_notifier_walk *walk)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm_arch_mmu_clear_young_walk(kvm, walk);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
@ -548,7 +535,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.clear_young_walk = kvm_mmu_notifier_clear_young_walk,
.test_young = kvm_mmu_notifier_test_young,
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,