Skip to content

Commit 546de70

Browse files
committed
drivers: xen: add more hypercall wrapper
Add more hypercall wrappers for supporting virtio-mmio backend. - dmop - dmop_create_ioreq_server HYPERVISOR_dm_op(XEN_DMOP_create_ioreq_server) - dmop_map_io_range_to_ioreq_server HYPERVISOR_dm_op(XEN_DMOP_map_io_range_to_ioreq_server) - dmop_set_ioreq_server_state: HYPERVISOR_dm_op(XEN_DMOP_set_ioreq_server_state) - dmop_nr_vcpus: HYPERVISOR_dm_op(XEN_DMOP_nr_vcpus) - dmop_set_irq_level: HYPERVISOR_dm_op(XEN_DMOP_set_irq_level) - memory - xendom_acquire_resource: HYPERVISOR_memory_op(XENMEM_acquire_resource) - sched - sched_poll: HYPERVISOR_sched_op(SCHEDOP_poll) Signed-off-by: TOKITA Hiroshi <[email protected]>
1 parent 67f5002 commit 546de70

File tree

7 files changed

+288
-0
lines changed

7 files changed

+288
-0
lines changed

drivers/xen/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,7 @@ zephyr_sources(hvm.c)
55
zephyr_sources(events.c)
66
zephyr_sources_ifdef(CONFIG_XEN_GRANT_TABLE gnttab.c)
77
zephyr_sources(memory.c)
8+
zephyr_sources(sched.c)
9+
zephyr_sources(dmop.c)
810

911
add_subdirectory_ifdef(CONFIG_XEN_DOM0 dom0)

drivers/xen/dmop.c

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
/*
2+
* Copyright 2025 TOKITA Hiroshi
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#include <zephyr/sys/util.h>
8+
#include <zephyr/xen/dmop.h>
9+
#include <zephyr/arch/arm64/hypercall.h>
10+
11+
int dmop_create_ioreq_server(domid_t domid, uint8_t handle_bufioreq, ioservid_t *id)
12+
{
13+
struct xen_dm_op_buf bufs[1] = {0};
14+
struct xen_dm_op dm_op = {0};
15+
int err;
16+
17+
dm_op.op = XEN_DMOP_create_ioreq_server;
18+
dm_op.u.create_ioreq_server.handle_bufioreq = handle_bufioreq;
19+
20+
set_xen_guest_handle(bufs[0].h, &dm_op);
21+
bufs[0].size = sizeof(struct xen_dm_op);
22+
23+
err = HYPERVISOR_dm_op(domid, ARRAY_SIZE(bufs), bufs);
24+
if (err) {
25+
return err;
26+
}
27+
28+
*id = dm_op.u.create_ioreq_server.id;
29+
30+
return 0;
31+
}
32+
33+
int dmop_map_io_range_to_ioreq_server(domid_t domid, ioservid_t id, uint32_t type, uint64_t start,
34+
uint64_t end)
35+
{
36+
struct xen_dm_op_buf bufs[1] = {0};
37+
struct xen_dm_op dm_op = {0};
38+
int err;
39+
40+
dm_op.op = XEN_DMOP_map_io_range_to_ioreq_server;
41+
dm_op.u.map_io_range_to_ioreq_server.id = id;
42+
dm_op.u.map_io_range_to_ioreq_server.type = type;
43+
dm_op.u.map_io_range_to_ioreq_server.start = start;
44+
dm_op.u.map_io_range_to_ioreq_server.end = end;
45+
46+
set_xen_guest_handle(bufs[0].h, &dm_op);
47+
bufs[0].size = sizeof(struct xen_dm_op);
48+
49+
err = HYPERVISOR_dm_op(domid, ARRAY_SIZE(bufs), bufs);
50+
if (err < 0) {
51+
return err;
52+
}
53+
54+
return 0;
55+
}
56+
57+
int dmop_set_ioreq_server_state(domid_t domid, ioservid_t id, uint8_t enabled)
58+
{
59+
struct xen_dm_op_buf bufs[1] = {0};
60+
struct xen_dm_op dm_op = {0};
61+
int err;
62+
63+
dm_op.op = XEN_DMOP_set_ioreq_server_state;
64+
dm_op.u.set_ioreq_server_state.id = id;
65+
dm_op.u.set_ioreq_server_state.enabled = enabled;
66+
67+
set_xen_guest_handle(bufs[0].h, &dm_op);
68+
bufs[0].size = sizeof(struct xen_dm_op);
69+
70+
err = HYPERVISOR_dm_op(domid, 1, bufs);
71+
if (err) {
72+
return err;
73+
}
74+
75+
return 0;
76+
}
77+
78+
int dmop_nr_vcpus(domid_t domid)
79+
{
80+
struct xen_dm_op_buf bufs[1] = {0};
81+
struct xen_dm_op dm_op = {0};
82+
int err;
83+
84+
dm_op.op = XEN_DMOP_nr_vcpus;
85+
86+
set_xen_guest_handle(bufs[0].h, &dm_op);
87+
bufs[0].size = sizeof(struct xen_dm_op);
88+
89+
err = HYPERVISOR_dm_op(domid, 1, bufs);
90+
if (err < 0) {
91+
return err;
92+
}
93+
94+
return dm_op.u.nr_vcpus.vcpus;
95+
}
96+
97+
int dmop_set_irq_level(domid_t domid, uint32_t irq, uint8_t level)
98+
{
99+
struct xen_dm_op_buf bufs[1] = {0};
100+
struct xen_dm_op dm_op = {0};
101+
int err;
102+
103+
dm_op.op = XEN_DMOP_set_irq_level;
104+
dm_op.u.set_irq_level.irq = irq;
105+
dm_op.u.set_irq_level.level = level;
106+
107+
set_xen_guest_handle(bufs[0].h, &dm_op);
108+
bufs[0].size = sizeof(struct xen_dm_op);
109+
110+
err = HYPERVISOR_dm_op(domid, 1, bufs);
111+
112+
return err;
113+
}

drivers/xen/memory.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,25 @@ int xendom_populate_physmap(int domid, unsigned int extent_order,
6666

6767
return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
6868
}
69+
70+
int xendom_acquire_resource(domid_t domid, uint16_t type, uint32_t id, uint64_t frame,
71+
uint32_t *nr_frames, xen_pfn_t *frame_list)
72+
{
73+
struct xen_mem_acquire_resource acquire_res = {
74+
.domid = domid,
75+
.type = type,
76+
.id = id,
77+
.pad = 0,
78+
.frame = frame,
79+
.nr_frames = *nr_frames,
80+
};
81+
int ret;
82+
83+
set_xen_guest_handle(acquire_res.frame_list, frame_list);
84+
85+
ret = HYPERVISOR_memory_op(XENMEM_acquire_resource, &acquire_res);
86+
87+
*nr_frames = acquire_res.nr_frames;
88+
89+
return ret;
90+
}

drivers/xen/sched.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
/*
2+
* Copyright (c) 2025 TOKITA Hiroshi
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#include <zephyr/arch/arm64/hypercall.h>
8+
#include <zephyr/xen/sched.h>
9+
10+
int sched_poll(evtchn_port_t *ports, unsigned int nr_ports, uint64_t timeout)
11+
{
12+
struct sched_poll poll = {
13+
.ports.p = ports,
14+
.nr_ports = 1,
15+
.timeout = timeout,
16+
};
17+
18+
return HYPERVISOR_sched_op(SCHEDOP_poll, &poll);
19+
}

include/zephyr/xen/dmop.h

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
/*
2+
* Copyright (c) 2025 TOKITA Hiroshi
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#ifndef ZEPHYR_XEN_DMOP_H_
8+
#define ZEPHYR_XEN_DMOP_H_
9+
10+
#include <xen/public/hvm/dm_op.h>
11+
12+
/**
13+
* @brief Create an I/O request server in the given Xen domain.
14+
*
15+
* This function issues the XEN_DMOP_create_ioreq_server hypercall to create
16+
* a server that handles I/O requests on behalf of the guest domain.
17+
*
18+
* @param domid Xen domain identifier where the server is created.
19+
* @param handle_bufioreq Flag indicating whether buffered I/O requests should be handled.
20+
* Set to non-zero to enable buffered handling.
21+
* @param id Output pointer to receive the newly created server ID.
22+
*
23+
* @return 0 on success, or a negative errno code on failure.
24+
*/
25+
int dmop_create_ioreq_server(domid_t domid, uint8_t handle_bufioreq, ioservid_t *id);
26+
27+
/**
28+
* @brief Map a specified I/O address range to an existing I/O request server.
29+
*
30+
* This function issues the XEN_DMOP_map_io_range_to_ioreq_server hypercall to grant
31+
* access to the given I/O address range for the specified server.
32+
*
33+
* @param domid Xen domain identifier where the mapping is applied.
34+
* @param id I/O request server ID returned by dmop_create_ioreq_server().
35+
* @param type Type identifier for the I/O range (e.g., MMIO, port I/O).
36+
* @param start Start physical address of the I/O range.
37+
* @param end End physical address (inclusive) of the I/O range.
38+
*
39+
* @return 0 on success, or a negative errno code on failure.
40+
*/
41+
int dmop_map_io_range_to_ioreq_server(domid_t domid, ioservid_t id, uint32_t type, uint64_t start,
42+
uint64_t end);
43+
44+
/**
45+
* @brief Enable or disable an existing I/O request server.
46+
*
47+
* This function issues the XEN_DMOP_set_ioreq_server_state hypercall to change
48+
* the operational state of the specified I/O request server.
49+
*
50+
* @param domid Xen domain identifier.
51+
* @param id I/O request server ID to modify.
52+
* @param enabled Non-zero to enable the server, zero to disable it.
53+
*
54+
* @return 0 on success, or a negative errno code on failure.
55+
*/
56+
int dmop_set_ioreq_server_state(domid_t domid, ioservid_t id, uint8_t enabled);
57+
58+
/**
59+
* @brief Query the number of virtual CPUs in a Xen domain.
60+
*
61+
* This function issues the XEN_DMOP_nr_vcpus hypercall to retrieve
62+
* the current vCPU count for the specified domain.
63+
*
64+
* @param domid Xen domain identifier to query.
65+
*
66+
* @return The number of vCPUs on success, or a negative errno code on failure.
67+
*/
68+
int dmop_nr_vcpus(domid_t domid);
69+
70+
/**
71+
* @brief Set the interrupt level for a specific IRQ in a Xen domain.
72+
*
73+
* This function issues the XEN_DMOP_set_irq_level hypercall to adjust
74+
* the signal level (assert or deassert) for the given IRQ line.
75+
*
76+
* @param domid Xen domain identifier.
77+
* @param irq IRQ number whose level is to be set.
78+
* @param level Non-zero to assert (raise) the IRQ, zero to deassert (lower) it.
79+
*
80+
* @return 0 on success, or a negative errno code on failure.
81+
*/
82+
int dmop_set_irq_level(domid_t domid, uint32_t irq, uint8_t level);
83+
84+
#endif /* ZEPHYR_XEN_DMOP_H_ */

include/zephyr/xen/memory.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,9 @@
33
* Copyright (c) 2023 EPAM Systems
44
*/
55

6+
#ifndef ZEPHYR_XEN_MEMORY_H_
7+
#define ZEPHYR_XEN_MEMORY_H_
8+
69
#include <zephyr/kernel.h>
710
#include <xen/public/memory.h>
811
#include <xen/public/xen.h>
@@ -64,3 +67,24 @@ int xendom_remove_from_physmap(int domid, xen_pfn_t gpfn);
6467
int xendom_populate_physmap(int domid, unsigned int extent_order,
6568
unsigned int nr_extents, unsigned int mem_flags,
6669
xen_pfn_t *extent_start);
70+
/**
71+
* @brief Acquire a resource mapping for the Xen domain.
72+
*
73+
* Issues the XENMEM_acquire_resource hypercall to map a resource buffer
74+
* (e.g., I/O request server, grant table, VM trace buffer) into the
75+
* specified domain's physmap, or query its total size.
76+
*
77+
* @param domid Target domain identifier. Use DOMID_SELF for the calling domain.
78+
* @param type Resource type identifier (e.g., XENMEM_resource_ioreq_server).
79+
* @param id Resource-specific identifier (e.g., server ID or table ID).
80+
* @param frame Starting frame number for mapping, or ignored if *nr_frames == 0.
81+
* @param nr_frames [in,out] On input, number of frames to map; on return,
82+
* number of frames actually mapped (or total frames if queried).
83+
* @param frame_list Guest frame list buffer: input GFNs for HVM guests,
84+
* output MFNs for PV guests.
85+
* @return Zero on success, or a negative errno code on failure.
86+
*/
87+
int xendom_acquire_resource(domid_t domid, uint16_t type, uint32_t id, uint64_t frame,
88+
uint32_t *nr_frames, xen_pfn_t *frame_list);
89+
90+
#endif /* ZEPHYR_XEN_MEMORY_H_ */

include/zephyr/xen/sched.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/*
2+
* Copyright (c) 2025 TOKITA Hiroshi
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#ifndef ZEPHYR_XEN_SCHED_H_
8+
#define ZEPHYR_XEN_SCHED_H_
9+
10+
#include <xen/public/sched.h>
11+
12+
/**
13+
* @brief Poll one or more Xen event channels for activity.
14+
*
15+
* Issues the SCHEDOP_poll hypercall to wait for events on the specified ports.
16+
*
17+
* @param ports Array of event channel ports to poll.
18+
* @param nr_ports Number of ports in the array.
19+
* @param timeout Timeout in microseconds to wait for an event.
20+
* @return 0 if an event occurred, -EAGAIN on timeout, or negative errno on error.
21+
*/
22+
int sched_poll(evtchn_port_t *ports, unsigned int nr_ports, uint64_t timeout);
23+
24+
#endif /* ZEPHYR_XEN_SCHED_H_ */

0 commit comments

Comments
 (0)