This repository has been archived by the owner on Jan 20, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 260
/
shim_thread.h
309 lines (238 loc) · 8.54 KB
/
shim_thread.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
/* SPDX-License-Identifier: LGPL-3.0-or-later */
/* Copyright (C) 2020 Intel Corporation
* Borys Popławski <[email protected]>
*/
#ifndef _SHIM_THREAD_H_
#define _SHIM_THREAD_H_
#include <linux/futex.h>
#include <linux/signal.h>
#include <stdbool.h>
#include <stdint.h>
#include "api.h"
#include "pal.h"
#include "list.h"
#include "shim_handle.h"
#include "shim_internal.h"
#include "shim_signal.h"
#include "shim_tcb.h"
#include "shim_types.h"
#define WAKE_QUEUE_TAIL ((void*)1)
/* If next is NULL, then this node is not on any queue.
* Otherwise it is a valid pointer to the next node or WAKE_QUEUE_TAIL. */
struct wake_queue_node {
struct wake_queue_node* next;
};
struct wake_queue_head {
struct wake_queue_node* first;
};
struct shim_signal_handles {
struct __kernel_sigaction actions[NUM_SIGS];
struct shim_lock lock;
REFTYPE ref_count;
};
/* For more info see: man signal(7) */
#define MAX_SIGNAL_LOG 32
struct shim_rt_signal_queue {
uint64_t put_idx;
uint64_t get_idx;
struct shim_signal* queue[MAX_SIGNAL_LOG];
};
struct shim_signal_queue {
struct shim_signal* standard_signals[SIGRTMIN - 1];
struct shim_rt_signal_queue rt_signal_queues[NUM_SIGS - SIGRTMIN + 1];
};
DEFINE_LIST(shim_thread);
DEFINE_LISTP(shim_thread);
struct shim_thread {
/* Field for inserting threads on global `g_thread_list`. */
LIST_TYPE(shim_thread) list;
/* thread identifiers */
IDTYPE tid;
/* credentials */
IDTYPE uid, gid, euid, egid;
/* thread pal handle */
PAL_HANDLE pal_handle;
struct shim_handle_map* handle_map;
/* child tid */
int* set_child_tid;
int* clear_child_tid; /* LibOS zeroes it to notify parent that thread exited */
int clear_child_tid_pal; /* PAL zeroes it to notify LibOS that thread exited */
/* signal handling */
__sigset_t signal_mask;
struct shim_signal_handles* signal_handles;
struct shim_signal_queue signal_queue;
/* For the field below, see the explanation in "LibOS/shim/src/bookkeep/shim_signal.c" near
* `process_pending_signals_cnt`. */
uint64_t pending_signals;
/*
* This field is used for checking whether we handled a signal (e.g. if we want to sleep and
* make some decision after wakeup based on whether we handled a signal, see `sigsuspend`)
* and can have following values:
* - `SIGNAL_NOT_HANDLED` - usually initialized to this - no signals were handled,
* - `SIGNAL_HANDLED` - at least one signal was handled,
* - `SIGNAL_HANDLED_RESTART` - same as above, but the signal had `SA_RESTART` flag.
* `SIGNAL_HANDLED` has priority over `SIGNAL_HANDLED_RESTART`, i.e. if we handle multiple
* signals, some with `SA_RESTART`, some without it, this field will be set to `SIGNAL_HANDLED`.
*/
unsigned char signal_handled;
stack_t signal_altstack;
/* futex robust list */
struct robust_list_head* robust_list;
PAL_HANDLE scheduler_event;
struct wake_queue_node wake_queue;
bool time_to_die;
void* stack;
void* stack_top;
void* stack_red;
shim_tcb_t* shim_tcb;
void* frameptr;
REFTYPE ref_count;
struct shim_lock lock;
};
struct shim_thread_queue {
struct shim_thread_queue* next;
struct shim_thread* thread;
/* We use this field to mark that this object is still in use (is on some queue). This is needed
* to distinguish spurious wake-ups from real ones. */
bool in_use;
};
#define SIGNAL_NOT_HANDLED 0
#define SIGNAL_HANDLED 1
#define SIGNAL_HANDLED_RESTART 2
int init_thread(void);
static inline bool is_internal(struct shim_thread* thread) {
return thread->tid >= INTERNAL_TID_BASE;
}
void clear_signal_queue(struct shim_signal_queue* queue);
void get_signal_handles(struct shim_signal_handles* handles);
void put_signal_handles(struct shim_signal_handles* handles);
void get_thread(struct shim_thread* thread);
void put_thread(struct shim_thread* thread);
void debug_setprefix(shim_tcb_t* tcb);
/* Set `debug_buf` for `tcb`. If `debug_buf` is `NULL`, then new one is allocated. IF `debug_buf`
* is not NULL, this function cannot fail. */
static inline int debug_setbuf(shim_tcb_t* tcb, struct debug_buf* debug_buf) {
if (!debug_handle)
return 0;
tcb->debug_buf = debug_buf ? debug_buf : malloc(sizeof(struct debug_buf));
if (!tcb->debug_buf) {
return -ENOMEM;
}
debug_setprefix(tcb);
return 0;
}
static inline struct shim_thread* get_cur_thread(void) {
return SHIM_TCB_GET(tp);
}
static inline unsigned int get_cur_tid(void) {
struct shim_thread* thread = get_cur_thread();
if (!thread) {
return 0;
}
return thread->tid;
}
static inline void set_cur_thread(struct shim_thread* thread) {
assert(thread);
shim_tcb_t* tcb = shim_get_tcb();
if (thread == tcb->tp) {
return;
}
get_thread(thread);
if (tcb->tp) {
put_thread(tcb->tp);
}
tcb->tp = thread;
thread->shim_tcb = tcb;
if (tcb->debug_buf)
debug_setprefix(tcb);
}
static inline void thread_setwait(struct shim_thread** queue, struct shim_thread* thread) {
if (!thread)
thread = get_cur_thread();
DkEventClear(thread->scheduler_event);
if (queue) {
get_thread(thread);
*queue = thread;
}
}
static inline int thread_sleep(uint64_t timeout_us) {
struct shim_thread* cur_thread = get_cur_thread();
if (!cur_thread)
return -EINVAL;
PAL_HANDLE event = cur_thread->scheduler_event;
if (!event)
return -EINVAL;
if (!DkSynchronizationObjectWait(event, timeout_us))
return -PAL_ERRNO();
return 0;
}
static inline void thread_wakeup(struct shim_thread* thread) {
DkEventSet(thread->scheduler_event);
}
/* Adds the thread to the wake-up queue.
* If this thread is already on some queue, then it *will* be woken up soon and there is no need
* to add it to another queue.
* queue->first should be a valid pointer or WAKE_QUEUE_TAIL (i.e. cannot be NULL).
*
* Returns 0 if the thread was added to the queue, 1 otherwise. */
static inline int add_thread_to_queue(struct wake_queue_head* queue, struct shim_thread* thread) {
struct wake_queue_node* nptr = NULL;
struct wake_queue_node* qnode = &thread->wake_queue;
/* Atomic cmpxchg is enough, no need to take thread->lock */
if (!__atomic_compare_exchange_n(&qnode->next, &nptr, queue->first,
/*weak=*/false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
return 1;
}
get_thread(thread);
queue->first = qnode;
return 0;
}
/* Wakes up all threads on the queue.
* This is a destructive operation - queue cannot be used after calling this function. */
static inline void wake_queue(struct wake_queue_head* queue) {
struct wake_queue_node* qnode = queue->first;
while (qnode != WAKE_QUEUE_TAIL) {
struct shim_thread* thread = container_of(qnode, struct shim_thread, wake_queue);
qnode = qnode->next;
__atomic_store_n(&thread->wake_queue.next, NULL, __ATOMIC_RELAXED);
thread_wakeup(thread);
put_thread(thread);
}
}
/*!
* \brief Look up the thread for a given id.
*
* \param tid Thread id to look for.
*
* Searches global threads list for a thread with id equal to \p tid.
* If no thread was found returns NULL.
* Increases refcount of the returned thread.
*/
struct shim_thread* lookup_thread(IDTYPE tid);
struct shim_thread* get_new_thread(void);
struct shim_thread* get_new_internal_thread(void);
/* Adds `thread` to global thread list. */
void add_thread(struct shim_thread* thread);
void cleanup_thread(IDTYPE caller, void* thread);
bool check_last_thread(bool mark_self_dead);
int walk_thread_list(int (*callback)(struct shim_thread*, void*), void* arg, bool one_shot);
void get_handle_map(struct shim_handle_map* map);
void put_handle_map(struct shim_handle_map* map);
static inline struct shim_handle_map* get_thread_handle_map(struct shim_thread* thread) {
if (!thread)
thread = get_cur_thread();
return thread ? thread->handle_map : NULL;
}
static inline void set_handle_map(struct shim_thread* thread, struct shim_handle_map* map) {
get_handle_map(map);
assert(thread);
if (thread->handle_map)
put_handle_map(thread->handle_map);
thread->handle_map = map;
}
bool kill_other_threads(void);
noreturn void thread_exit(int error_code, int term_signal);
noreturn void process_exit(int error_code, int term_signal);
void release_robust_list(struct robust_list_head* head);
void release_clear_child_tid(int* clear_child_tid);
#endif /* _SHIM_THREAD_H_ */