Skip to content

Commit

Permalink
xen/balloon: Share common memory reservation routines
Browse files Browse the repository at this point in the history
Memory {increase|decrease}_reservation and VA mappings update/reset
code used in balloon driver can be made common, so other drivers can
also re-use the same functionality without open-coding.
Create a dedicated file for the shared code and export corresponding
symbols for other kernel modules.

Signed-off-by: Oleksandr Andrushchenko <[email protected]>
Reviewed-by: Boris Ostrovsky <[email protected]>
Signed-off-by: Boris Ostrovsky <[email protected]>
  • Loading branch information
Oleksandr Andrushchenko authored and Boris Ostrovsky committed Jul 27, 2018
1 parent 8c3799e commit ae4c51a
Show file tree
Hide file tree
Showing 4 changed files with 184 additions and 69 deletions.
1 change: 1 addition & 0 deletions drivers/xen/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += mem-reservation.o
obj-y += events/
obj-y += xenbus/

Expand Down
75 changes: 6 additions & 69 deletions drivers/xen/balloon.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/mem-reservation.h>

static int xen_hotplug_unpopulated;

Expand Down Expand Up @@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
#define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)

static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}

/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
Expand Down Expand Up @@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
int rc;
unsigned long i;
struct page *page;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};

if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
Expand All @@ -479,46 +468,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
break;
}

/* XENMEM_populate_physmap requires a PFN based on Xen
* granularity.
*/
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page);
}

set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
rc = xenmem_reservation_increase(nr_pages, frame_list);
if (rc <= 0)
return BP_EAGAIN;

for (i = 0; i < rc; i++) {
page = balloon_retrieve(false);
BUG_ON(page == NULL);

#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);

set_phys_to_machine(pfn, frame_list[i]);

/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
#endif
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);

/* Relinquish the page back to the allocator. */
free_reserved_page(page);
Expand All @@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
unsigned long i;
struct page *page, *tmp;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
LIST_HEAD(pages);

if (nr_pages > ARRAY_SIZE(frame_list))
Expand All @@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break;
}
adjust_managed_page_count(page, -1);
scrub_page(page);
xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
}

Expand All @@ -572,38 +529,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/
i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
/* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);

#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
xenmem_reservation_va_mapping_reset(1, &page);

if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
list_del(&page->lru);

balloon_append(page);
}

flush_tlb_all();

set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
ret = xenmem_reservation_decrease(nr_pages, frame_list);
BUG_ON(ret != nr_pages);

balloon_stats.current_pages -= nr_pages;
Expand Down
118 changes: 118 additions & 0 deletions drivers/xen/mem-reservation.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0

/******************************************************************************
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/

#include <asm/xen/hypercall.h>

#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>

/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
* multiple frame.
*/
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)

#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
int i;

for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);

BUG_ON(!page);

/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

set_phys_to_machine(pfn, frames[i]);

/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;

ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);

void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
int i;

for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);

/*
* We don't support PV MMU when Linux and Xen are using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

if (!PageHighMem(page)) {
int ret;

ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
#endif /* CONFIG_XEN_HAVE_PVMMU */

/* @frames is an array of PFNs */
int xenmem_reservation_increase(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};

/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_increase);

/* @frames is an array of GFNs */
int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};

/* XENMEM_decrease_reservation requires a GFN */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
59 changes: 59 additions & 0 deletions include/xen/mem-reservation.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0 */

/*
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/

#ifndef _XENMEM_RESERVATION_H
#define _XENMEM_RESERVATION_H

#include <linux/highmem.h>

#include <xen/page.h>

static inline void xenmem_reservation_scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}

#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames);

void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages);
#endif

static inline void xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_update(count, pages, frames);
#endif
}

static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_reset(count, pages);
#endif
}

int xenmem_reservation_increase(int count, xen_pfn_t *frames);

int xenmem_reservation_decrease(int count, xen_pfn_t *frames);

#endif

0 comments on commit ae4c51a

Please sign in to comment.