On Saturday, 27 February 2021 2:59:09 AM AEDT Christoph Hellwig wrote:
struct page *page = migration_entry_to_page(entry);
struct page *page = pfn_to_page(swp_offset(entry));
I wonder if keeping a single special_entry_to_page() helper would still me a useful. But I'm not entirely sure. There are also two more open coded copies of this in the THP migration code.
I think it might be if only to clearly document where these entries are used. Will add it for the next version to see what it looks like.
-#define free_swap_and_cache(e) ({(is_migration_entry(e) ||
is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) ||
is_device_private_entry(e));})
+#define free_swap_and_cache(e) is_special_entry(e) +#define swapcache_prepare(e) is_special_entry(e)
Staring at this I'm really, really confused at what this is doing.
Looking a little closer these are the !CONFIG_SWAP stubs, but it could probably use a comment or two.
Will do, thanks.
- Alistair
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
page = pfn_to_page(swp_offset(entry));
rss[mm_counter(page)]++;
@@ -737,7 +737,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct
mm_struct *src_mm,
set_pte_at(src_mm, addr, src_pte, pte); }
} else if (is_device_private_entry(entry)) {
page = device_private_entry_to_page(entry);
page = pfn_to_page(swp_offset(entry));
/*
- Update rss count even for unaddressable pages, as
@@ -1274,7 +1274,7 @@ static unsigned long zap_pte_range(struct mmu_gather
*tlb,
entry = pte_to_swp_entry(ptent); if (is_device_private_entry(entry)) {
struct page *page = device_private_entry_to_page(entry);
struct page *page = pfn_to_page(swp_offset(entry)); if (unlikely(details && details->check_mapping)) { /*
@@ -1303,7 +1303,7 @@ static unsigned long zap_pte_range(struct mmu_gather
*tlb,
else if (is_migration_entry(entry)) { struct page *page;
page = migration_entry_to_page(entry);
} if (unlikely(!free_swap_and_cache(entry)))page = pfn_to_page(swp_offset(entry)); rss[mm_counter(page)]--;
@@ -3271,7 +3271,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); } else if (is_device_private_entry(entry)) {
vmf->page = device_private_entry_to_page(entry);
} else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON;vmf->page = pfn_to_page(swp_offset(entry)); ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
diff --git a/mm/migrate.c b/mm/migrate.c index 20ca887ea769..72adcc3d8f5b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -321,7 +321,7 @@ void __migration_entry_wait(struct mm_struct *mm,
pte_t *ptep,
if (!is_migration_entry(entry)) goto out;
- page = migration_entry_to_page(entry);
page = pfn_to_page(swp_offset(entry));
/*
- Once page cache replacement of page migration started, page_count
@@ -361,7 +361,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm,
pmd_t *pmd)
ptl = pmd_lock(mm, pmd); if (!is_pmd_migration_entry(*pmd)) goto unlock;
- page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
- page = pfn_to_page(swp_offset(pmd_to_swp_entry(*pmd))); if (!get_page_unless_zero(page)) goto unlock; spin_unlock(ptl);
@@ -2437,7 +2437,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, if (!is_device_private_entry(entry)) goto next;
page = device_private_entry_to_page(entry);
page = pfn_to_page(swp_offset(entry)); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || page->pgmap->owner != migrate->pgmap_owner)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 86e3a3688d59..34230d08556a 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) if (!is_migration_entry(entry)) return false;
pfn = migration_entry_to_pfn(entry);
} else if (is_swap_pte(*pvmw->pte)) { swp_entry_t entry;pfn = swp_offset(entry);
@@ -105,7 +105,7 @@ static bool check_pte(struct page_vma_mapped_walk
*pvmw)
if (!is_device_private_entry(entry)) return false;
pfn = device_private_entry_to_pfn(entry);
} else { if (!pte_present(*pvmw->pte)) return false;pfn = swp_offset(entry);
@@ -200,7 +200,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk
*pvmw)
if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
if (migration_entry_to_page(entry) != page)
if (pfn_to_page(swp_offset(entry)) != page) return not_found(pvmw); return true; }
---end quoted text---
On Tuesday, 2 March 2021 7:52:53 PM AEDT Alistair Popple wrote:
On Saturday, 27 February 2021 2:59:09 AM AEDT Christoph Hellwig wrote:
struct page *page = migration_entry_to_page(entry);
struct page *page = pfn_to_page(swp_offset(entry));
I wonder if keeping a single special_entry_to_page() helper would still me a useful. But I'm not entirely sure. There are also two more open coded copies of this in the THP migration code.
I think it might be if only to clearly document where these entries are
used.
Will add it for the next version to see what it looks like.
Actually the main advantage ends up being that it becomes easy to retain the page locked check for migration entries.
- Alistair
dri-devel@lists.freedesktop.org