-
-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
staticdata.c
3782 lines (3530 loc) · 163 KB
/
staticdata.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file is a part of Julia. License is MIT: https://julialang.org/license
/*
saving and restoring system images
This performs serialization and deserialization of system and package images. It creates and saves a compact binary
blob, making deserialization "simple" and fast: we "only" need to deal with uniquing, pointer relocation,
method root insertion, registering with the garbage collector, making note of special internal types, and
backedges/invalidation. Special objects include things like builtin functions, C-implemented types (those in jltypes.c),
the metadata for documentation, optimal layouts, integration with native system image generation, and preparing other
preprocessing directives.
During serialization, the flow has several steps:
- step 1 inserts relevant items into `serialization_order`, an `obj` => `id::Int` mapping. `id` is assigned by
order of insertion. This stage is implemented by `jl_queue_for_serialization` and its callees;
while it would be simplest to use recursion, this risks stack overflow, so recursion is mimicked
using a work-queue managed by `jl_serialize_reachable`.
It's worth emphasizing that the only goal of this stage is to insert objects into `serialization_order`.
In later stages, such objects get written in order of `id`.
- step 2 (the biggest of four steps) takes all items in `serialization_order` and actually serializes them ordered
by `id`. The system is serialized into several distinct streams (see `jl_serializer_state`), a "main stream"
(the `s` field) as well as parallel streams for writing specific categories of additional internal data (e.g.,
global data invisible to codegen, as well as deserialization "touch-up" tables, see below). These different streams
will be concatenated in later steps. Certain key items (e.g., builtin types & functions associated with `INSERT_TAG`
below, integers smaller than 512) get serialized via a hard-coded tag table.
Serialization builds "touch up" tables used during deserialization. Pointers and items requiring gc
registration get encoded as `(location, target)` pairs in `relocs_list` and `gctags_list`, respectively.
`location` is the site that needs updating (e.g., the address of a pointer referencing an object), and is
set to `position(s)`, the offset of the object from the beginning of the deserialized blob.
`target` is a bitfield-encoded index into lists of different categories of data (e.g., mutable data, constant data,
symbols, functions, etc.) to which the pointer at `location` refers. The different lists and their bitfield flags
are given by the `RefTags` enum: if `t` is the category tag (one of the `RefTags` enums) and `i` is the index into
one of the corresponding categorical list, then `index = t << RELOC_TAG_OFFSET + i`. The simplest source for the
details of this encoding can be found in the pair of functions `get_reloc_for_item` and `get_item_for_reloc`.
`uniquing` also holds the serialized location of external DataTypes, MethodInstances, and singletons
in the serialized blob (i.e., new-at-the-time-of-serialization specializations).
Most of step 2 is handled by `jl_write_values`, followed by special handling of the dedicated parallel streams.
- step 3 combines the different sections (fields of `jl_serializer_state`) into one
- step 4 writes the values of the hard-coded tagged items and `ccallable_list`
Much of the "real work" during deserialization is done by `get_item_for_reloc`. But a few items require specific
attention:
- uniquing: during deserialization, the target item (an "external" type or MethodInstance) must be checked against
the running system to see whether such an object already exists (i.e., whether some other previously-loaded package
or workload has created such types/MethodInstances previously) or whether it needs to be created de-novo.
In either case, all references at `location` must be updated to the one in the running system.
`new_dt_objs` is a hash set of newly allocated datatype-reachable objects
- method root insertion: when new specializations generate new roots, these roots must be inserted into
method root tables
- backedges & invalidation: external edges have to be checked against the running system and any invalidations executed.
Encoding of a pointer:
- in the location of the pointer, we initially write zero padding
- for both relocs_list and gctags_list, we write loc/backrefid (for gctags_list this is handled by the caller of write_gctaggedfield,
for relocs_list it's handled by write_pointerfield)
- when writing to disk, both call get_reloc_for_item, and its return value (subject to modification by gc bits)
ends up being written into the data stream (s->s), and the data stream's position written to s->relocs
External links:
- location holds the offset
- loc/0 in relocs_list
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h> // printf
#include <inttypes.h> // PRIxPTR
#include "julia.h"
#include "julia_internal.h"
#include "julia_gcext.h"
#include "builtin_proto.h"
#include "processor.h"
#include "serialize.h"
#ifndef _OS_WINDOWS_
#include <dlfcn.h>
#endif
#include "valgrind.h"
#include "julia_assert.h"
static const size_t WORLD_AGE_REVALIDATION_SENTINEL = 0x1;
#include "staticdata_utils.c"
#include "precompile_utils.c"
#ifdef __cplusplus
extern "C" {
#endif
// TODO: put WeakRefs on the weak_refs list during deserialization
// TODO: handle finalizers
#define NUM_TAGS 188
// An array of references that need to be restored from the sysimg
// This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C.
jl_value_t **const*const get_tags(void) {
// Make sure to keep an extra slot at the end to sentinel length
static void * _tags[NUM_TAGS] = {NULL};
// Lazyily-initialize this list
if (_tags[0] == NULL) {
unsigned int i = 0;
#define INSERT_TAG(sym) _tags[i++] = &(sym)
// builtin types
INSERT_TAG(jl_any_type);
INSERT_TAG(jl_symbol_type);
INSERT_TAG(jl_ssavalue_type);
INSERT_TAG(jl_datatype_type);
INSERT_TAG(jl_slotnumber_type);
INSERT_TAG(jl_simplevector_type);
INSERT_TAG(jl_array_type);
INSERT_TAG(jl_expr_type);
INSERT_TAG(jl_binding_type);
INSERT_TAG(jl_globalref_type);
INSERT_TAG(jl_string_type);
INSERT_TAG(jl_module_type);
INSERT_TAG(jl_tvar_type);
INSERT_TAG(jl_method_instance_type);
INSERT_TAG(jl_method_type);
INSERT_TAG(jl_code_instance_type);
INSERT_TAG(jl_linenumbernode_type);
INSERT_TAG(jl_lineinfonode_type);
INSERT_TAG(jl_gotonode_type);
INSERT_TAG(jl_quotenode_type);
INSERT_TAG(jl_gotoifnot_type);
INSERT_TAG(jl_enternode_type);
INSERT_TAG(jl_argument_type);
INSERT_TAG(jl_returnnode_type);
INSERT_TAG(jl_const_type);
INSERT_TAG(jl_partial_struct_type);
INSERT_TAG(jl_partial_opaque_type);
INSERT_TAG(jl_interconditional_type);
INSERT_TAG(jl_method_match_type);
INSERT_TAG(jl_pinode_type);
INSERT_TAG(jl_phinode_type);
INSERT_TAG(jl_phicnode_type);
INSERT_TAG(jl_upsilonnode_type);
INSERT_TAG(jl_type_type);
INSERT_TAG(jl_bottom_type);
INSERT_TAG(jl_ref_type);
INSERT_TAG(jl_pointer_type);
INSERT_TAG(jl_llvmpointer_type);
INSERT_TAG(jl_vararg_type);
INSERT_TAG(jl_abstractarray_type);
INSERT_TAG(jl_densearray_type);
INSERT_TAG(jl_nothing_type);
INSERT_TAG(jl_function_type);
INSERT_TAG(jl_typeofbottom_type);
INSERT_TAG(jl_unionall_type);
INSERT_TAG(jl_typename_type);
INSERT_TAG(jl_builtin_type);
INSERT_TAG(jl_code_info_type);
INSERT_TAG(jl_opaque_closure_type);
INSERT_TAG(jl_task_type);
INSERT_TAG(jl_uniontype_type);
INSERT_TAG(jl_abstractstring_type);
INSERT_TAG(jl_array_any_type);
INSERT_TAG(jl_intrinsic_type);
INSERT_TAG(jl_methtable_type);
INSERT_TAG(jl_typemap_level_type);
INSERT_TAG(jl_typemap_entry_type);
INSERT_TAG(jl_voidpointer_type);
INSERT_TAG(jl_uint8pointer_type);
INSERT_TAG(jl_newvarnode_type);
INSERT_TAG(jl_anytuple_type_type);
INSERT_TAG(jl_anytuple_type);
INSERT_TAG(jl_namedtuple_type);
INSERT_TAG(jl_emptytuple_type);
INSERT_TAG(jl_array_symbol_type);
INSERT_TAG(jl_array_uint8_type);
INSERT_TAG(jl_array_uint32_type);
INSERT_TAG(jl_array_int32_type);
INSERT_TAG(jl_array_uint64_type);
INSERT_TAG(jl_int32_type);
INSERT_TAG(jl_int64_type);
INSERT_TAG(jl_bool_type);
INSERT_TAG(jl_uint8_type);
INSERT_TAG(jl_uint16_type);
INSERT_TAG(jl_uint32_type);
INSERT_TAG(jl_uint64_type);
INSERT_TAG(jl_char_type);
INSERT_TAG(jl_weakref_type);
INSERT_TAG(jl_int8_type);
INSERT_TAG(jl_int16_type);
INSERT_TAG(jl_float16_type);
INSERT_TAG(jl_float32_type);
INSERT_TAG(jl_float64_type);
INSERT_TAG(jl_bfloat16_type);
INSERT_TAG(jl_floatingpoint_type);
INSERT_TAG(jl_number_type);
INSERT_TAG(jl_signed_type);
INSERT_TAG(jl_pair_type);
INSERT_TAG(jl_genericmemory_type);
INSERT_TAG(jl_memory_any_type);
INSERT_TAG(jl_memory_uint8_type);
INSERT_TAG(jl_memory_uint16_type);
INSERT_TAG(jl_memory_uint32_type);
INSERT_TAG(jl_memory_uint64_type);
INSERT_TAG(jl_genericmemoryref_type);
INSERT_TAG(jl_memoryref_any_type);
INSERT_TAG(jl_memoryref_uint8_type);
INSERT_TAG(jl_addrspace_type);
INSERT_TAG(jl_addrspace_typename);
INSERT_TAG(jl_addrspacecore_type);
// special typenames
INSERT_TAG(jl_tuple_typename);
INSERT_TAG(jl_pointer_typename);
INSERT_TAG(jl_llvmpointer_typename);
INSERT_TAG(jl_array_typename);
INSERT_TAG(jl_type_typename);
INSERT_TAG(jl_namedtuple_typename);
INSERT_TAG(jl_vecelement_typename);
INSERT_TAG(jl_opaque_closure_typename);
INSERT_TAG(jl_genericmemory_typename);
INSERT_TAG(jl_genericmemoryref_typename);
// special exceptions
INSERT_TAG(jl_errorexception_type);
INSERT_TAG(jl_argumenterror_type);
INSERT_TAG(jl_typeerror_type);
INSERT_TAG(jl_methoderror_type);
INSERT_TAG(jl_loaderror_type);
INSERT_TAG(jl_initerror_type);
INSERT_TAG(jl_undefvarerror_type);
INSERT_TAG(jl_stackovf_exception);
INSERT_TAG(jl_diverror_exception);
INSERT_TAG(jl_interrupt_exception);
INSERT_TAG(jl_boundserror_type);
INSERT_TAG(jl_memory_exception);
INSERT_TAG(jl_undefref_exception);
INSERT_TAG(jl_readonlymemory_exception);
INSERT_TAG(jl_atomicerror_type);
INSERT_TAG(jl_missingcodeerror_type);
INSERT_TAG(jl_precompilable_error);
// other special values
INSERT_TAG(jl_emptysvec);
INSERT_TAG(jl_emptytuple);
INSERT_TAG(jl_false);
INSERT_TAG(jl_true);
INSERT_TAG(jl_an_empty_string);
INSERT_TAG(jl_an_empty_vec_any);
INSERT_TAG(jl_an_empty_memory_any);
INSERT_TAG(jl_module_init_order);
INSERT_TAG(jl_core_module);
INSERT_TAG(jl_base_module);
INSERT_TAG(jl_main_module);
INSERT_TAG(jl_top_module);
INSERT_TAG(jl_typeinf_func);
INSERT_TAG(jl_type_type_mt);
INSERT_TAG(jl_nonfunction_mt);
INSERT_TAG(jl_kwcall_mt);
INSERT_TAG(jl_kwcall_func);
INSERT_TAG(jl_opaque_closure_method);
// some Core.Builtin Functions that we want to be able to reference:
INSERT_TAG(jl_builtin_throw);
INSERT_TAG(jl_builtin_is);
INSERT_TAG(jl_builtin_typeof);
INSERT_TAG(jl_builtin_sizeof);
INSERT_TAG(jl_builtin_issubtype);
INSERT_TAG(jl_builtin_isa);
INSERT_TAG(jl_builtin_typeassert);
INSERT_TAG(jl_builtin__apply_iterate);
INSERT_TAG(jl_builtin_isdefined);
INSERT_TAG(jl_builtin_nfields);
INSERT_TAG(jl_builtin_tuple);
INSERT_TAG(jl_builtin_svec);
INSERT_TAG(jl_builtin_getfield);
INSERT_TAG(jl_builtin_setfield);
INSERT_TAG(jl_builtin_swapfield);
INSERT_TAG(jl_builtin_modifyfield);
INSERT_TAG(jl_builtin_replacefield);
INSERT_TAG(jl_builtin_setfieldonce);
INSERT_TAG(jl_builtin_fieldtype);
INSERT_TAG(jl_builtin_memoryref);
INSERT_TAG(jl_builtin_memoryrefoffset);
INSERT_TAG(jl_builtin_memoryrefget);
INSERT_TAG(jl_builtin_memoryrefset);
INSERT_TAG(jl_builtin_memoryref_isassigned);
INSERT_TAG(jl_builtin_memoryrefswap);
INSERT_TAG(jl_builtin_memoryrefmodify);
INSERT_TAG(jl_builtin_memoryrefreplace);
INSERT_TAG(jl_builtin_memoryrefsetonce);
INSERT_TAG(jl_builtin_apply_type);
INSERT_TAG(jl_builtin_applicable);
INSERT_TAG(jl_builtin_invoke);
INSERT_TAG(jl_builtin__expr);
INSERT_TAG(jl_builtin_ifelse);
INSERT_TAG(jl_builtin__typebody);
INSERT_TAG(jl_builtin_donotdelete);
INSERT_TAG(jl_builtin_compilerbarrier);
INSERT_TAG(jl_builtin_getglobal);
INSERT_TAG(jl_builtin_setglobal);
INSERT_TAG(jl_builtin_swapglobal);
INSERT_TAG(jl_builtin_modifyglobal);
INSERT_TAG(jl_builtin_replaceglobal);
INSERT_TAG(jl_builtin_setglobalonce);
// n.b. must update NUM_TAGS when you add something here
#undef INSERT_TAG
assert(i == NUM_TAGS - 1);
}
return (jl_value_t**const*const) _tags;
}
// hash of definitions for predefined tagged object
static htable_t symbol_table;
static uintptr_t nsym_tag;
// array of definitions for the predefined tagged object types
// (reverse of symbol_table)
static arraylist_t deser_sym;
static htable_t serialization_order; // to break cycles, mark all objects that are serialized
static htable_t nullptrs;
// FIFO queue for objects to be serialized. Anything requiring fixup upon deserialization
// must be "toplevel" in this queue. For types, parameters and field types must appear
// before the "wrapper" type so they can be properly recached against the running system.
static arraylist_t serialization_queue;
static arraylist_t layout_table; // cache of `position(s)` for each `id` in `serialization_order`
static arraylist_t object_worklist; // used to mimic recursion by jl_serialize_reachable
// Permanent list of void* (begin, end+1) pairs of system/package images we've loaded previously
// together with their module build_ids (used for external linkage)
// jl_linkage_blobs.items[2i:2i+1] correspond to build_ids[i] (0-offset indexing)
arraylist_t jl_linkage_blobs;
arraylist_t jl_image_relocs;
// Eytzinger tree of images. Used for very fast jl_object_in_image queries
// See https://algorithmica.org/en/eytzinger
arraylist_t eytzinger_image_tree;
arraylist_t eytzinger_idxs;
static uintptr_t img_min;
static uintptr_t img_max;
static int ptr_cmp(const void *l, const void *r)
{
uintptr_t left = *(const uintptr_t*)l;
uintptr_t right = *(const uintptr_t*)r;
return (left > right) - (left < right);
}
// Build an eytzinger tree from a sorted array
static int eytzinger(uintptr_t *src, uintptr_t *dest, size_t i, size_t k, size_t n)
{
if (k <= n) {
i = eytzinger(src, dest, i, 2 * k, n);
dest[k-1] = src[i];
i++;
i = eytzinger(src, dest, i, 2 * k + 1, n);
}
return i;
}
static size_t eyt_obj_idx(jl_value_t *obj) JL_NOTSAFEPOINT
{
size_t n = eytzinger_image_tree.len - 1;
if (n == 0)
return n;
assert(n % 2 == 0 && "Eytzinger tree not even length!");
uintptr_t cmp = (uintptr_t) obj;
if (cmp <= img_min || cmp > img_max)
return n;
uintptr_t *tree = (uintptr_t*)eytzinger_image_tree.items;
size_t k = 1;
// note that k preserves the history of how we got to the current node
while (k <= n) {
int greater = (cmp > tree[k - 1]);
k <<= 1;
k |= greater;
}
// Free to assume k is nonzero, since we start with k = 1
// and cmp > gc_img_min
// This shift does a fast revert of the path until we get
// to a node that evaluated less than cmp.
k >>= (__builtin_ctzll(k) + 1);
assert(k != 0);
assert(k <= n && "Eytzinger tree index out of bounds!");
assert(tree[k - 1] < cmp && "Failed to find lower bound for object!");
return k - 1;
}
//used in staticdata.c after we add an image
void rebuild_image_blob_tree(void)
{
size_t inc = 1 + jl_linkage_blobs.len - eytzinger_image_tree.len;
assert(eytzinger_idxs.len == eytzinger_image_tree.len);
assert(eytzinger_idxs.max == eytzinger_image_tree.max);
arraylist_grow(&eytzinger_idxs, inc);
arraylist_grow(&eytzinger_image_tree, inc);
eytzinger_idxs.items[eytzinger_idxs.len - 1] = (void*)jl_linkage_blobs.len;
eytzinger_image_tree.items[eytzinger_image_tree.len - 1] = (void*)1; // outside image
for (size_t i = 0; i < jl_linkage_blobs.len; i++) {
assert((uintptr_t) jl_linkage_blobs.items[i] % 4 == 0 && "Linkage blob not 4-byte aligned!");
// We abuse the pointer here a little so that a couple of properties are true:
// 1. a start and an end are never the same value. This simplifies the binary search.
// 2. ends are always after starts. This also simplifies the binary search.
// We assume that there exist no 0-size blobs, but that's a safe assumption
// since it means nothing could be there anyways
uintptr_t val = (uintptr_t) jl_linkage_blobs.items[i];
eytzinger_idxs.items[i] = (void*)(val + (i & 1));
}
qsort(eytzinger_idxs.items, eytzinger_idxs.len - 1, sizeof(void*), ptr_cmp);
img_min = (uintptr_t) eytzinger_idxs.items[0];
img_max = (uintptr_t) eytzinger_idxs.items[eytzinger_idxs.len - 2] + 1;
eytzinger((uintptr_t*)eytzinger_idxs.items, (uintptr_t*)eytzinger_image_tree.items, 0, 1, eytzinger_idxs.len - 1);
// Reuse the scratch memory to store the indices
// Still O(nlogn) because binary search
for (size_t i = 0; i < jl_linkage_blobs.len; i ++) {
uintptr_t val = (uintptr_t) jl_linkage_blobs.items[i];
// This is the same computation as in the prior for loop
uintptr_t eyt_val = val + (i & 1);
size_t eyt_idx = eyt_obj_idx((jl_value_t*)(eyt_val + 1)); assert(eyt_idx < eytzinger_idxs.len - 1);
assert(eytzinger_image_tree.items[eyt_idx] == (void*)eyt_val && "Eytzinger tree failed to find object!");
if (i & 1)
eytzinger_idxs.items[eyt_idx] = (void*)n_linkage_blobs();
else
eytzinger_idxs.items[eyt_idx] = (void*)(i / 2);
}
}
static int eyt_obj_in_img(jl_value_t *obj) JL_NOTSAFEPOINT
{
assert((uintptr_t) obj % 4 == 0 && "Object not 4-byte aligned!");
int idx = eyt_obj_idx(obj);
// Now we use a tiny trick: tree[idx] & 1 is whether or not tree[idx] is a
// start (0) or an end (1) of a blob. If it's a start, then the object is
// in the image, otherwise it is not.
int in_image = ((uintptr_t)eytzinger_image_tree.items[idx] & 1) == 0;
return in_image;
}
size_t external_blob_index(jl_value_t *v) JL_NOTSAFEPOINT
{
assert((uintptr_t) v % 4 == 0 && "Object not 4-byte aligned!");
int eyt_idx = eyt_obj_idx(v);
// We fill the invalid slots with the length, so we can just return that
size_t idx = (size_t) eytzinger_idxs.items[eyt_idx];
return idx;
}
uint8_t jl_object_in_image(jl_value_t *obj) JL_NOTSAFEPOINT
{
return eyt_obj_in_img(obj);
}
// hash of definitions for predefined function pointers
static htable_t fptr_to_id;
void *native_functions; // opaque jl_native_code_desc_t blob used for fetching data from LLVM
// table of struct field addresses to rewrite during saving
static htable_t field_replace;
// array of definitions for the predefined function pointers
// (reverse of fptr_to_id)
// This is a manually constructed dual of the fvars array, which would be produced by codegen for Julia code, for C.
static const jl_fptr_args_t id_to_fptrs[] = {
&jl_f_throw, &jl_f_is, &jl_f_typeof, &jl_f_issubtype, &jl_f_isa,
&jl_f_typeassert, &jl_f__apply_iterate, &jl_f__apply_pure,
&jl_f__call_latest, &jl_f__call_in_world, &jl_f__call_in_world_total, &jl_f_isdefined,
&jl_f_tuple, &jl_f_svec, &jl_f_intrinsic_call,
&jl_f_getfield, &jl_f_setfield, &jl_f_swapfield, &jl_f_modifyfield, &jl_f_setfieldonce,
&jl_f_replacefield, &jl_f_fieldtype, &jl_f_nfields, &jl_f_apply_type,
&jl_f_memoryref, &jl_f_memoryrefoffset, &jl_f_memoryrefget, &jl_f_memoryref_isassigned,
&jl_f_memoryrefset, &jl_f_memoryrefswap, &jl_f_memoryrefmodify, &jl_f_memoryrefreplace, &jl_f_memoryrefsetonce,
&jl_f_applicable, &jl_f_invoke, &jl_f_sizeof, &jl_f__expr, &jl_f__typevar,
&jl_f_ifelse, &jl_f__structtype, &jl_f__abstracttype, &jl_f__primitivetype,
&jl_f__typebody, &jl_f__setsuper, &jl_f__equiv_typedef, &jl_f_get_binding_type,
&jl_f_set_binding_type, &jl_f_opaque_closure_call, &jl_f_donotdelete, &jl_f_compilerbarrier,
&jl_f_getglobal, &jl_f_setglobal, &jl_f_swapglobal, &jl_f_modifyglobal, &jl_f_replaceglobal, &jl_f_setglobalonce,
&jl_f_finalizer, &jl_f__compute_sparams, &jl_f__svec_ref,
&jl_f_current_scope,
NULL };
typedef struct {
ios_t *s; // the main stream
ios_t *const_data; // GC-invisible internal data (e.g., datatype layouts, list-like typename fields, foreign types, internal arrays)
ios_t *symbols; // names (char*) of symbols (some may be referenced by pointer in generated code)
ios_t *relocs; // for (de)serializing relocs_list and gctags_list
ios_t *gvar_record; // serialized array mapping gvid => spos
ios_t *fptr_record; // serialized array mapping fptrid => spos
arraylist_t memowner_list; // a list of memory locations that have shared owners
arraylist_t memref_list; // a list of memoryref locations
arraylist_t relocs_list; // a list of (location, target) pairs, see description at top
arraylist_t gctags_list; // "
arraylist_t uniquing_types; // a list of locations that reference types that must be de-duplicated
arraylist_t uniquing_super; // a list of datatypes, used in super fields, that need to be marked in uniquing_types once they are reached, for handling unique-ing of them on deserialization
arraylist_t uniquing_objs; // a list of locations that reference non-types that must be de-duplicated
arraylist_t fixup_types; // a list of locations of types requiring (re)caching
arraylist_t fixup_objs; // a list of locations of objects requiring (re)caching
arraylist_t ccallable_list; // @ccallable entry points to install
// mapping from a buildid_idx to a depmods_idx
jl_array_t *buildid_depmods_idxs;
// record of build_ids for all external linkages, in order of serialization for the current sysimg/pkgimg
// conceptually, the base pointer for the jth externally-linked item is determined from
// i = findfirst(==(link_ids[j]), build_ids)
// blob_base = jl_linkage_blobs.items[2i] # 0-offset indexing
// We need separate lists since they are intermingled at creation but split when written.
jl_array_t *link_ids_relocs;
jl_array_t *link_ids_gctags;
jl_array_t *link_ids_gvars;
jl_array_t *link_ids_external_fnvars;
jl_ptls_t ptls;
// Set (implemented has a hasmap of MethodInstances to themselves) of which MethodInstances have (forward) edges
// to other MethodInstances.
htable_t callers_with_edges;
jl_image_t *image;
int8_t incremental;
} jl_serializer_state;
static jl_value_t *jl_bigint_type = NULL;
static int gmp_limb_size = 0;
static jl_sym_t *jl_docmeta_sym = NULL;
#ifdef _P64
#define RELOC_TAG_OFFSET 61
#define DEPS_IDX_OFFSET 40 // only on 64-bit can we encode the dependency-index as part of the tagged reloc
#else
// this supports up to 8 RefTags, 512MB of pointer data, and 4/2 (64/32-bit) GB of constant data.
#define RELOC_TAG_OFFSET 29
#define DEPS_IDX_OFFSET RELOC_TAG_OFFSET
#endif
// Tags of category `t` are located at offsets `t << RELOC_TAG_OFFSET`
// Consequently there is room for 2^RELOC_TAG_OFFSET pointers, etc
enum RefTags {
DataRef, // mutable data
ConstDataRef, // constant data (e.g., layouts)
TagRef, // items serialized via their tags
SymbolRef, // symbols
FunctionRef, // functions
SysimageLinkage, // reference to the sysimage (from pkgimage)
ExternalLinkage // reference to some other pkgimage
};
// calling conventions for internal entry points.
// this is used to set the method-instance->invoke field
typedef enum {
JL_API_NULL,
JL_API_BOXED,
JL_API_CONST,
JL_API_WITH_PARAMETERS,
JL_API_INTERPRETED,
JL_API_BUILTIN,
JL_API_MAX
} jl_callingconv_t;
// Sub-divisions of some RefTags
const uintptr_t BuiltinFunctionTag = ((uintptr_t)1 << (RELOC_TAG_OFFSET - 1));
#if RELOC_TAG_OFFSET <= 32
typedef uint32_t reloc_t;
#else
typedef uint64_t reloc_t;
#endif
static void write_reloc_t(ios_t *s, uintptr_t reloc_id) JL_NOTSAFEPOINT
{
if (sizeof(reloc_t) <= sizeof(uint32_t)) {
assert(reloc_id < UINT32_MAX);
write_uint32(s, reloc_id);
}
else {
write_uint64(s, reloc_id);
}
}
// Reporting to PkgCacheInspector
typedef struct {
size_t sysdata;
size_t isbitsdata;
size_t symboldata;
size_t tagslist;
size_t reloclist;
size_t gvarlist;
size_t fptrlist;
} pkgcachesizes;
// --- Static Compile ---
static void *jl_sysimg_handle = NULL;
static jl_image_t sysimage;
static inline uintptr_t *sysimg_gvars(const char *base, const int32_t *offsets, size_t idx)
{
return (uintptr_t*)(base + offsets[idx]);
}
JL_DLLEXPORT int jl_running_on_valgrind(void)
{
return RUNNING_ON_VALGRIND;
}
void *system_image_data_unavailable;
extern void * JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(system_image_data_unavailable) jl_system_image_data;
extern void * JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(system_image_data_unavailable) jl_system_image_size;
static void jl_load_sysimg_so(void)
{
const char *sysimg_data;
assert(sysimage.fptrs.ptrs); // jl_init_processor_sysimg should already be run
if (jl_sysimg_handle == jl_exe_handle &&
&jl_system_image_data != JL_WEAK_SYMBOL_DEFAULT(system_image_data_unavailable))
sysimg_data = (const char*)&jl_system_image_data;
else
jl_dlsym(jl_sysimg_handle, "jl_system_image_data", (void **)&sysimg_data, 1);
size_t *plen;
if (jl_sysimg_handle == jl_exe_handle &&
&jl_system_image_size != JL_WEAK_SYMBOL_DEFAULT(system_image_data_unavailable))
plen = (size_t *)&jl_system_image_size;
else
jl_dlsym(jl_sysimg_handle, "jl_system_image_size", (void **)&plen, 1);
jl_restore_system_image_data(sysimg_data, *plen);
}
// --- serializer ---
#define NBOX_C 1024
static int jl_needs_serialization(jl_serializer_state *s, jl_value_t *v) JL_NOTSAFEPOINT
{
// ignore items that are given a special relocation representation
if (s->incremental && jl_object_in_image(v))
return 0;
if (v == NULL || jl_is_symbol(v) || v == jl_nothing) {
return 0;
}
else if (jl_typetagis(v, jl_int64_tag << 4)) {
int64_t i64 = *(int64_t*)v + NBOX_C / 2;
if ((uint64_t)i64 < NBOX_C)
return 0;
}
else if (jl_typetagis(v, jl_int32_tag << 4)) {
int32_t i32 = *(int32_t*)v + NBOX_C / 2;
if ((uint32_t)i32 < NBOX_C)
return 0;
}
else if (jl_typetagis(v, jl_uint8_tag << 4)) {
return 0;
}
else if (v == (jl_value_t*)s->ptls->root_task) {
return 0;
}
return 1;
}
static int caching_tag(jl_value_t *v) JL_NOTSAFEPOINT
{
if (jl_is_method_instance(v)) {
jl_method_instance_t *mi = (jl_method_instance_t*)v;
jl_value_t *m = mi->def.value;
if (jl_is_method(m) && jl_object_in_image(m))
return 1 + type_in_worklist(mi->specTypes);
}
if (jl_is_datatype(v)) {
jl_datatype_t *dt = (jl_datatype_t*)v;
if (jl_is_tuple_type(dt) ? !dt->isconcretetype : dt->hasfreetypevars)
return 0; // aka !is_cacheable from jltypes.c
if (jl_object_in_image((jl_value_t*)dt->name))
return 1 + type_in_worklist(v);
}
jl_value_t *dtv = jl_typeof(v);
if (jl_is_datatype_singleton((jl_datatype_t*)dtv)) {
return 1 - type_in_worklist(dtv); // these are already recached in the datatype in the image
}
return 0;
}
static int needs_recaching(jl_value_t *v) JL_NOTSAFEPOINT
{
return caching_tag(v) == 2;
}
static int needs_uniquing(jl_value_t *v) JL_NOTSAFEPOINT
{
assert(!jl_object_in_image(v));
return caching_tag(v) == 1;
}
static void record_field_change(jl_value_t **addr, jl_value_t *newval) JL_NOTSAFEPOINT
{
ptrhash_put(&field_replace, (void*)addr, newval);
}
static jl_value_t *get_replaceable_field(jl_value_t **addr, int mutabl) JL_GC_DISABLED
{
jl_value_t *fld = (jl_value_t*)ptrhash_get(&field_replace, addr);
if (fld == HT_NOTFOUND) {
fld = *addr;
if (mutabl && fld && jl_is_cpointer_type(jl_typeof(fld)) && jl_unbox_voidpointer(fld) != NULL && jl_unbox_voidpointer(fld) != (void*)(uintptr_t)-1) {
void **nullval = ptrhash_bp(&nullptrs, (void*)jl_typeof(fld));
if (*nullval == HT_NOTFOUND) {
void *C_NULL = NULL;
*nullval = (void*)jl_new_bits(jl_typeof(fld), &C_NULL);
}
fld = (jl_value_t*)*nullval;
}
return fld;
}
return fld;
}
static uintptr_t jl_fptr_id(void *fptr)
{
void **pbp = ptrhash_bp(&fptr_to_id, fptr);
if (*pbp == HT_NOTFOUND || fptr == NULL)
return 0;
else
return *(uintptr_t*)pbp;
}
// `jl_queue_for_serialization` adds items to `serialization_order`
#define jl_queue_for_serialization(s, v) jl_queue_for_serialization_((s), (jl_value_t*)(v), 1, 0)
static void jl_queue_for_serialization_(jl_serializer_state *s, jl_value_t *v, int recursive, int immediate) JL_GC_DISABLED;
static void jl_queue_module_for_serialization(jl_serializer_state *s, jl_module_t *m) JL_GC_DISABLED
{
jl_queue_for_serialization(s, m->name);
jl_queue_for_serialization(s, m->parent);
jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindings));
jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindingkeyset));
if (jl_options.strip_metadata) {
jl_svec_t *table = jl_atomic_load_relaxed(&m->bindings);
for (size_t i = 0; i < jl_svec_len(table); i++) {
jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
if ((void*)b == jl_nothing)
break;
jl_sym_t *name = b->globalref->name;
if (name == jl_docmeta_sym && jl_atomic_load_relaxed(&b->value))
record_field_change((jl_value_t**)&b->value, jl_nothing);
}
}
for (size_t i = 0; i < m->usings.len; i++) {
jl_queue_for_serialization(s, (jl_value_t*)m->usings.items[i]);
}
}
// Anything that requires uniquing or fixing during deserialization needs to be "toplevel"
// in serialization (i.e., have its own entry in `serialization_order`). Consequently,
// objects that act as containers for other potentially-"problematic" objects must add such "children"
// to the queue.
// Most objects use preorder traversal. But things that need uniquing require postorder:
// you want to handle uniquing of `Dict{String,Float64}` before you tackle `Vector{Dict{String,Float64}}`.
// Uniquing is done in `serialization_order`, so the very first mention of such an object must
// be the "source" rather than merely a cross-reference.
static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_t *v, int recursive, int immediate) JL_GC_DISABLED
{
jl_datatype_t *t = (jl_datatype_t*)jl_typeof(v);
jl_queue_for_serialization_(s, (jl_value_t*)t, 1, immediate);
const jl_datatype_layout_t *layout = t->layout;
if (!recursive)
goto done_fields;
if (s->incremental && jl_is_datatype(v) && immediate) {
jl_datatype_t *dt = (jl_datatype_t*)v;
// ensure all type parameters are recached
jl_queue_for_serialization_(s, (jl_value_t*)dt->parameters, 1, 1);
if (jl_is_datatype_singleton(dt) && needs_uniquing(dt->instance)) {
assert(jl_needs_serialization(s, dt->instance)); // should be true, since we visited dt
// do not visit dt->instance for our template object as it leads to unwanted cycles here
// (it may get serialized from elsewhere though)
record_field_change(&dt->instance, jl_nothing);
}
goto done_fields; // for now
}
if (s->incremental && jl_is_method_instance(v)) {
jl_method_instance_t *mi = (jl_method_instance_t*)v;
jl_value_t *def = mi->def.value;
if (needs_uniquing(v)) {
// we only need 3 specific fields of this (the rest are not used)
jl_queue_for_serialization(s, mi->def.value);
jl_queue_for_serialization(s, mi->specTypes);
jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals);
goto done_fields;
}
else if (jl_is_method(def) && jl_object_in_image(def)) {
// we only need 3 specific fields of this (the rest are restored afterward, if valid)
// in particular, cache is repopulated by jl_mi_cache_insert for all foreign function,
// so must not be present here
record_field_change((jl_value_t**)&mi->uninferred, NULL);
record_field_change((jl_value_t**)&mi->backedges, NULL);
record_field_change((jl_value_t**)&mi->cache, NULL);
}
else {
assert(!needs_recaching(v));
}
// n.b. opaque closures cannot be inspected and relied upon like a
// normal method since they can get improperly introduced by generated
// functions, so if they appeared at all, we will probably serialize
// them wrong and segfault. The jl_code_for_staged function should
// prevent this from happening, so we do not need to detect that user
// error now.
}
if (s->incremental && jl_is_globalref(v)) {
jl_globalref_t *gr = (jl_globalref_t*)v;
if (jl_object_in_image((jl_value_t*)gr->mod)) {
record_field_change((jl_value_t**)&gr->binding, NULL);
}
}
if (jl_is_typename(v)) {
jl_typename_t *tn = (jl_typename_t*)v;
// don't recurse into several fields (yet)
jl_queue_for_serialization_(s, (jl_value_t*)jl_atomic_load_relaxed(&tn->cache), 0, 1);
jl_queue_for_serialization_(s, (jl_value_t*)jl_atomic_load_relaxed(&tn->linearcache), 0, 1);
if (s->incremental) {
assert(!jl_object_in_image((jl_value_t*)tn->module));
assert(!jl_object_in_image((jl_value_t*)tn->wrapper));
}
}
if (s->incremental && jl_is_code_instance(v)) {
jl_code_instance_t *ci = (jl_code_instance_t*)v;
// make sure we don't serialize other reachable cache entries of foreign methods
// Should this now be:
// if (ci !in ci->defs->cache)
// record_field_change((jl_value_t**)&ci->next, NULL);
// Why are we checking that the method/module this originates from is in_image?
// and then disconnect this CI?
if (jl_object_in_image((jl_value_t*)ci->def->def.value)) {
// TODO: if (ci in ci->defs->cache)
record_field_change((jl_value_t**)&ci->next, NULL);
}
}
if (immediate) // must be things that can be recursively handled, and valid as type parameters
assert(jl_is_immutable(t) || jl_is_typevar(v) || jl_is_symbol(v) || jl_is_svec(v));
if (layout->npointers == 0) {
// bitstypes do not require recursion
}
else if (jl_is_svec(v)) {
size_t i, l = jl_svec_len(v);
jl_value_t **data = jl_svec_data(v);
for (i = 0; i < l; i++) {
jl_queue_for_serialization_(s, data[i], 1, immediate);
}
}
else if (jl_is_array(v)) {
jl_array_t *ar = (jl_array_t*)v;
jl_value_t *mem = get_replaceable_field((jl_value_t**)&ar->ref.mem, 1);
jl_queue_for_serialization_(s, mem, 1, immediate);
}
else if (jl_is_genericmemory(v)) {
jl_genericmemory_t *m = (jl_genericmemory_t*)v;
const char *data = (const char*)m->ptr;
if (jl_genericmemory_how(m) == 3) {
jl_queue_for_serialization_(s, jl_genericmemory_data_owner_field(v), 1, immediate);
}
else if (layout->flags.arrayelem_isboxed) {
size_t i, l = m->length;
for (i = 0; i < l; i++) {
jl_value_t *fld = get_replaceable_field(&((jl_value_t**)data)[i], 1);
jl_queue_for_serialization_(s, fld, 1, immediate);
}
}
else if (layout->first_ptr >= 0) {
uint16_t elsz = layout->size;
size_t i, l = m->length;
size_t j, np = layout->npointers;
for (i = 0; i < l; i++) {
for (j = 0; j < np; j++) {
uint32_t ptr = jl_ptr_offset(t, j);
jl_value_t *fld = get_replaceable_field(&((jl_value_t**)data)[ptr], 1);
jl_queue_for_serialization_(s, fld, 1, immediate);
}
data += elsz;
}
}
}
else if (jl_typetagis(v, jl_module_tag << 4)) {
jl_queue_module_for_serialization(s, (jl_module_t*)v);
}
else if (layout->nfields > 0) {
char *data = (char*)jl_data_ptr(v);
size_t i, np = layout->npointers;
for (i = 0; i < np; i++) {
uint32_t ptr = jl_ptr_offset(t, i);
int mutabl = t->name->mutabl;
if (jl_is_binding(v) && ((jl_binding_t*)v)->constp && i == 0) // value field depends on constp field
mutabl = 0;
jl_value_t *fld = get_replaceable_field(&((jl_value_t**)data)[ptr], mutabl);
jl_queue_for_serialization_(s, fld, 1, immediate);
}
}
done_fields: ;
// We've encountered an item we need to cache
void **bp = ptrhash_bp(&serialization_order, v);
assert(*bp == (void*)(uintptr_t)-2);
arraylist_push(&serialization_queue, (void*) v);
size_t idx = serialization_queue.len - 1;
assert(serialization_queue.len < ((uintptr_t)1 << RELOC_TAG_OFFSET) && "too many items to serialize");
*bp = (void*)((char*)HT_NOTFOUND + 1 + idx);
// DataType is very unusual, in that some of the fields need to be pre-order, and some
// (notably super) must not be (even if `jl_queue_for_serialization_` would otherwise
// try to promote itself to be immediate)
if (s->incremental && jl_is_datatype(v) && immediate && recursive) {
jl_datatype_t *dt = (jl_datatype_t*)v;
void **bp = ptrhash_bp(&serialization_order, (void*)dt->super);
if (*bp != (void*)-2) {
// if super is already on the stack of things to handle when this returns, do
// not try to handle it now
jl_queue_for_serialization_(s, (jl_value_t*)dt->super, 1, immediate);
}
immediate = 0;
char *data = (char*)jl_data_ptr(v);
size_t i, np = layout->npointers;
for (i = 0; i < np; i++) {
uint32_t ptr = jl_ptr_offset(t, i);
if (ptr * sizeof(jl_value_t*) == offsetof(jl_datatype_t, super))
continue; // skip the super field, since it might not be quite validly ordered
int mutabl = 1;
jl_value_t *fld = get_replaceable_field(&((jl_value_t**)data)[ptr], mutabl);
jl_queue_for_serialization_(s, fld, 1, immediate);
}
}
}
static void jl_queue_for_serialization_(jl_serializer_state *s, jl_value_t *v, int recursive, int immediate) JL_GC_DISABLED
{
if (!jl_needs_serialization(s, v))
return;
jl_value_t *t = jl_typeof(v);
// Items that require postorder traversal must visit their children prior to insertion into
// the worklist/serialization_order (and also before their first use)
if (s->incremental && !immediate) {
if (jl_is_datatype(t) && needs_uniquing(v))
immediate = 1;
if (jl_is_datatype_singleton((jl_datatype_t*)t) && needs_uniquing(v))
immediate = 1;
}
void **bp = ptrhash_bp(&serialization_order, v);
assert(!immediate || *bp != (void*)(uintptr_t)-2);
if (*bp == HT_NOTFOUND)
*bp = (void*)(uintptr_t)-1; // now enqueued
else if (!s->incremental || !immediate || !recursive || *bp != (void*)(uintptr_t)-1)
return;
if (immediate) {
*bp = (void*)(uintptr_t)-2; // now immediate
jl_insert_into_serialization_queue(s, v, recursive, immediate);
}
else {
arraylist_push(&object_worklist, (void*)v);
}
}
// Do a pre-order traversal of the to-serialize worklist, in the identical order
// to the calls to jl_queue_for_serialization would occur in a purely recursive
// implementation, but without potentially running out of stack.
static void jl_serialize_reachable(jl_serializer_state *s) JL_GC_DISABLED
{
size_t i, prevlen = 0;
while (object_worklist.len) {
// reverse!(object_worklist.items, prevlen:end);
// prevlen is the index of the first new object
for (i = prevlen; i < object_worklist.len; i++) {
size_t j = object_worklist.len - i + prevlen - 1;
void *tmp = object_worklist.items[i];
object_worklist.items[i] = object_worklist.items[j];
object_worklist.items[j] = tmp;
}
prevlen = --object_worklist.len;
jl_value_t *v = (jl_value_t*)object_worklist.items[prevlen];
void **bp = ptrhash_bp(&serialization_order, (void*)v);
assert(*bp != HT_NOTFOUND && *bp != (void*)(uintptr_t)-2);
if (*bp == (void*)(uintptr_t)-1) { // might have been eagerly handled for post-order while in the lazy pre-order queue
*bp = (void*)(uintptr_t)-2;
jl_insert_into_serialization_queue(s, v, 1, 0);
}
else {
assert(s->incremental);
}
}
}
static void ios_ensureroom(ios_t *s, size_t newsize) JL_NOTSAFEPOINT
{
size_t prevsize = s->size;
if (prevsize < newsize) {