On 05/25/2018 11:33 AM, Oleksandr Andrushchenko wrote:
+void xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
+{ +#ifdef CONFIG_XEN_HAVE_PVMMU
- int i;
- for (i = 0; i < count; i++) {
struct page *page;
page = pages[i];
BUG_ON(page == NULL);
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
- }
+#endif +} +EXPORT_SYMBOL(xenmem_reservation_va_mapping_update);
+void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
+{ +#ifdef CONFIG_XEN_HAVE_PVMMU
- int i;
- for (i = 0; i < count; i++) {
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
- }
+#endif +} +EXPORT_SYMBOL(xenmem_reservation_va_mapping_reset);
One other thing I noticed --- both of these can be declared as NOPs in the header file if !CONFIG_XEN_HAVE_PVMMU.
-boris