Skip to content

Commit f4e35a0

Browse files
committed
drivers: xen: add basic XenStore access features
Add functions for accessing XenStore. Currently not support write feature. Signed-off-by: TOKITA Hiroshi <[email protected]>
1 parent 92f0083 commit f4e35a0

File tree

3 files changed

+481
-0
lines changed

3 files changed

+481
-0
lines changed

drivers/xen/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,6 @@ zephyr_sources_ifdef(CONFIG_XEN_GRANT_TABLE gnttab.c)
77
zephyr_sources(memory.c)
88
zephyr_sources(sched.c)
99
zephyr_sources(dmop.c)
10+
zephyr_sources(xenstore.c)
1011

1112
add_subdirectory_ifdef(CONFIG_XEN_DOM0 dom0)

drivers/xen/xenstore.c

Lines changed: 376 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,376 @@
1+
/*
2+
* Copyright (c) 2023 EPAM Systems
3+
* Copyright (c) 2025 TOKITA Hiroshi
4+
*
5+
* SPDX-License-Identifier: Apache-2.0
6+
*/
7+
8+
#include <stdint.h>
9+
#include <string.h>
10+
#include <sys/types.h>
11+
12+
#include <xen/public/io/xs_wire.h>
13+
14+
#include <zephyr/kernel.h>
15+
#include <zephyr/spinlock.h>
16+
#include <zephyr/xen/generic.h>
17+
#include <zephyr/xen/hvm.h>
18+
#include <zephyr/xen/sched.h>
19+
#include <zephyr/xen/events.h>
20+
#include <zephyr/xen/xenstore.h>
21+
#include <zephyr/sys/device_mmio.h>
22+
23+
#include <zephyr/logging/log.h>
24+
25+
/*
26+
* Importing and modifying code from the following source.
27+
* https://github.com/xen-troops/zephyr-xenlib/blob/v3.1.0/xenstore-srv/src/xenstore_srv.c
28+
*
29+
* The queues used are swapped because the read and write directions for xenstore are different.
30+
*/
31+
32+
static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
33+
{
34+
return ((prod - cons) > XENSTORE_RING_SIZE);
35+
}
36+
37+
static size_t get_input_offset(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, size_t *len)
38+
{
39+
size_t delta = prod - cons;
40+
*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
41+
42+
if (delta < *len) {
43+
*len = delta;
44+
}
45+
46+
return MASK_XENSTORE_IDX(cons);
47+
}
48+
49+
static size_t get_output_offset(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, size_t *len)
50+
{
51+
size_t free_space = XENSTORE_RING_SIZE - (prod - cons);
52+
53+
*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
54+
if (free_space < *len) {
55+
*len = free_space;
56+
}
57+
58+
return MASK_XENSTORE_IDX(prod);
59+
}
60+
61+
static int ring_write(struct xenstore *xs, const void *data, size_t len)
62+
{
63+
size_t avail;
64+
void *dest;
65+
struct xenstore_domain_interface *intf = xs->domint;
66+
XENSTORE_RING_IDX cons, prod;
67+
68+
cons = intf->req_cons;
69+
prod = intf->req_prod;
70+
z_barrier_dmem_fence_full();
71+
72+
if (check_indexes(cons, prod)) {
73+
return -EINVAL;
74+
}
75+
76+
dest = intf->req + get_output_offset(cons, prod, &avail);
77+
if (avail < len) {
78+
len = avail;
79+
}
80+
81+
memcpy(dest, data, len);
82+
z_barrier_dmem_fence_full();
83+
intf->req_prod += len;
84+
85+
notify_evtchn(xs->local_evtchn);
86+
87+
return len;
88+
}
89+
90+
static int ring_read(struct xenstore *xs, void *data, size_t len)
91+
{
92+
size_t avail;
93+
const void *src;
94+
struct xenstore_domain_interface *intf = xs->domint;
95+
XENSTORE_RING_IDX cons, prod;
96+
97+
cons = intf->rsp_cons;
98+
prod = intf->rsp_prod;
99+
z_barrier_dmem_fence_full();
100+
101+
if (check_indexes(cons, prod)) {
102+
return -EIO;
103+
}
104+
105+
src = intf->rsp + get_input_offset(cons, prod, &avail);
106+
if (avail < len) {
107+
len = avail;
108+
}
109+
110+
if (data) {
111+
memcpy(data, src, len);
112+
}
113+
114+
z_barrier_dmem_fence_full();
115+
intf->rsp_cons += len;
116+
117+
if (data) {
118+
notify_evtchn(xs->local_evtchn);
119+
}
120+
121+
return len;
122+
}
123+
124+
/*
125+
* End importing.
126+
*/
127+
128+
LOG_MODULE_REGISTER(xen_xenstore);
129+
130+
static atomic_t next_req_id;
131+
static struct k_sem xs_lock;
132+
133+
static uint8_t work_buf[XENSTORE_RING_SIZE * 2];
134+
static uint8_t read_buf[XENSTORE_RING_SIZE];
135+
static size_t work_pos;
136+
static size_t read_pos;
137+
static int expected_req_id;
138+
static int processed_req_id;
139+
140+
static xs_notify_cb notify_cb;
141+
static void *notify_param;
142+
143+
struct k_spinlock lock;
144+
145+
static inline size_t ring_avail_for_read(struct xenstore *xs)
146+
{
147+
struct xenstore_domain_interface *intf = xs->domint;
148+
XENSTORE_RING_IDX cons, prod;
149+
150+
cons = intf->rsp_cons;
151+
prod = intf->rsp_prod;
152+
153+
return prod - cons;
154+
}
155+
156+
static void store_cb(void *ptr)
157+
{
158+
struct xenstore *xs = ptr;
159+
160+
while (ring_avail_for_read(xs)) {
161+
int ret = ring_read(xs, work_buf + work_pos, ring_avail_for_read(xs));
162+
if (ret < 0) {
163+
break;
164+
}
165+
if (work_pos + ret > sizeof(work_buf)) {
166+
LOG_ERR("work_buf overflowed: %ld", work_pos + ret);
167+
work_pos = 0;
168+
break;
169+
}
170+
171+
work_pos += ret;
172+
}
173+
174+
size_t offset = 0;
175+
while (work_pos - offset >= sizeof(struct xsd_sockmsg)) {
176+
struct xsd_sockmsg *hdr = (void *)(work_buf + offset);
177+
size_t msglen = hdr->len + sizeof(*hdr);
178+
if (work_pos - offset < msglen) {
179+
break;
180+
}
181+
182+
if ((hdr->req_id == expected_req_id) && (hdr->req_id > processed_req_id)) {
183+
k_spinlock_key_t key = k_spin_lock(&lock);
184+
memcpy(read_buf, hdr, msglen);
185+
read_pos = msglen;
186+
k_spin_unlock(&lock, key);
187+
188+
processed_req_id = expected_req_id;
189+
k_sem_give(&xs_lock);
190+
} else {
191+
k_spinlock_key_t key = k_spin_lock(&lock);
192+
193+
if (notify_cb) {
194+
notify_cb((char *)hdr, msglen, notify_param);
195+
}
196+
197+
k_spin_unlock(&lock, key);
198+
}
199+
offset += msglen;
200+
}
201+
202+
if (offset) {
203+
memmove(work_buf, work_buf + offset, work_pos - offset);
204+
work_pos -= offset;
205+
}
206+
}
207+
208+
static int xs_cmd_req(struct xenstore *xs, int type, const char **params, size_t param_num,
209+
char *buf, size_t len, uint32_t *preq_id)
210+
{
211+
size_t plen = 0;
212+
int err;
213+
214+
for (int i = 0; i < param_num; i++) {
215+
plen += strlen(params[i]) + 1;
216+
}
217+
218+
if (plen > XENSTORE_PAYLOAD_MAX) {
219+
LOG_ERR("strlen(path) + 1: %zu > XENSTORE_PAYLOAD_MAX", plen);
220+
return -ENAMETOOLONG;
221+
}
222+
223+
*preq_id = atomic_inc(&next_req_id);
224+
if (*preq_id == 0) {
225+
*preq_id = atomic_inc(&next_req_id);
226+
}
227+
228+
struct xsd_sockmsg hdr = {
229+
.type = type,
230+
.req_id = *preq_id,
231+
.tx_id = 0,
232+
.len = plen,
233+
};
234+
235+
err = ring_write(xs, &hdr, sizeof(struct xsd_sockmsg));
236+
if (err < 0) {
237+
LOG_ERR("ring_write(hdr) failed: %d", err);
238+
return -EAGAIN;
239+
} else if (err < sizeof(struct xsd_sockmsg)) {
240+
LOG_ERR("ring_write(hdr) shorter response: %d", err);
241+
return -EIO;
242+
}
243+
244+
for (int i = 0; i < param_num; i++) {
245+
err = ring_write(xs, params[i], strlen(params[i]) + 1);
246+
if (err < 0) {
247+
LOG_ERR("ring_write(path) failed: %d", err);
248+
return -EAGAIN;
249+
} else if (err < strlen(params[i]) + 1) {
250+
LOG_ERR("ring_write(path) shorter response: %d", err);
251+
return -EIO;
252+
}
253+
}
254+
255+
return 0;
256+
}
257+
258+
static ssize_t xs_cmd(struct xenstore *xs, int type, const char **params, size_t params_num,
259+
char *buf, size_t len, k_timeout_t timeout)
260+
{
261+
int err;
262+
struct xsd_sockmsg *hdr;
263+
264+
err = xs_cmd_req(xs, type, params, params_num, buf, len, &expected_req_id);
265+
if (err < 0) {
266+
LOG_ERR("xs_rw_common error: %d", err);
267+
return -EIO;
268+
}
269+
270+
err = k_sem_take(&xs_lock, timeout);
271+
if (err < 0) {
272+
LOG_ERR("k_sem_take error: %d", err);
273+
return -EINVAL;
274+
}
275+
276+
hdr = (void *)read_buf;
277+
278+
if (hdr->len > len) {
279+
LOG_ERR("no buffer hdr.len=%u > len=%zu)", hdr->len, len);
280+
err = -ENOBUFS;
281+
goto end;
282+
}
283+
284+
ssize_t copy_len = (len - 1 < hdr->len) ? len - 1 : hdr->len;
285+
k_spinlock_key_t key = k_spin_lock(&lock);
286+
287+
memcpy(buf, read_buf + sizeof(struct xsd_sockmsg), copy_len);
288+
read_pos = 0;
289+
290+
k_spin_unlock(&lock, key);
291+
292+
if (copy_len < len) {
293+
buf[copy_len] = '\0';
294+
}
295+
296+
end:
297+
if (err) {
298+
return err;
299+
}
300+
301+
return copy_len;
302+
}
303+
304+
int xs_init_xenstore(struct xenstore *xs)
305+
{
306+
uint64_t paddr = 0;
307+
uint64_t value = 0;
308+
mm_reg_t vaddr = 0;
309+
int err;
310+
311+
k_sem_init(&xs_lock, 0, 1);
312+
313+
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, DOMID_SELF, &value);
314+
if (err) {
315+
LOG_ERR("hvm_get_parameter(STORE_EVTCHN) failed: %d", err);
316+
return -ENODEV;
317+
}
318+
xs->local_evtchn = value;
319+
320+
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, DOMID_SELF, &paddr);
321+
if (err) {
322+
LOG_ERR("hvm_get_param(STORE_PFN) failed: err=%d", err);
323+
return -EIO;
324+
}
325+
device_map(&vaddr, XEN_PFN_PHYS(paddr), XEN_PAGE_SIZE, K_MEM_CACHE_WB | K_MEM_PERM_RW);
326+
327+
xs->domint = (struct xenstore_domain_interface *)vaddr;
328+
329+
while (ring_avail_for_read(xs)) {
330+
ring_read(xs, NULL, ring_avail_for_read(xs));
331+
}
332+
333+
bind_event_channel(xs->local_evtchn, store_cb, xs);
334+
unmask_event_channel(xs->local_evtchn);
335+
336+
return 0;
337+
}
338+
339+
void xs_set_notify_callback(xs_notify_cb cb, void *param)
340+
{
341+
notify_cb = cb;
342+
notify_param = param;
343+
}
344+
345+
ssize_t xs_read(struct xenstore *xs, const char *path, char *buf, size_t len)
346+
{
347+
const char *params[] = {path};
348+
349+
if (!xs || !path || !buf || len == 0) {
350+
return -EINVAL;
351+
}
352+
353+
return xs_cmd(xs, XS_READ, params, ARRAY_SIZE(params), buf, len, K_FOREVER);
354+
}
355+
356+
ssize_t xs_directory(struct xenstore *xs, const char *path, char *buf, size_t len)
357+
{
358+
const char *params[] = {path};
359+
360+
if (!xs || !path || !buf || len == 0) {
361+
return -EINVAL;
362+
}
363+
364+
return xs_cmd(xs, XS_DIRECTORY, params, ARRAY_SIZE(params), buf, len, K_FOREVER);
365+
}
366+
367+
ssize_t xs_watch(struct xenstore *xs, const char *path, const char *token, char *buf, size_t len)
368+
{
369+
const char *params[] = {path, token};
370+
371+
if (!xs || !path || !token || !buf || len == 0) {
372+
return -EINVAL;
373+
}
374+
375+
return xs_cmd(xs, XS_WATCH, params, ARRAY_SIZE(params), buf, len, K_FOREVER);
376+
}

0 commit comments

Comments
 (0)