-
Notifications
You must be signed in to change notification settings - Fork 17.9k
/
Copy pathmbitmap.go
1724 lines (1627 loc) · 56.9 KB
/
mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: type and heap bitmaps.
//
// Stack, data, and bss bitmaps
//
// Stack frames and global variables in the data and bss sections are described
// by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
// to be visited during GC. The bits in each byte are consumed starting with
// the low bit: 1<<0, 1<<1, and so on.
//
// Heap bitmap
//
// The allocated heap comes from a subset of the memory in the range [start, used),
// where start == mheap_.arena_start and used == mheap_.arena_used.
// The heap bitmap comprises 2 bits for each pointer-sized word in that range,
// stored in bytes indexed backward in memory from start.
// That is, the byte at address start-1 holds the 2-bit entries for the four words
// start through start+3*ptrSize, the byte at start-2 holds the entries for
// start+4*ptrSize through start+7*ptrSize, and so on.
//
// In each 2-bit entry, the lower bit holds the same information as in the 1-bit
// bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
// The meaning of the high bit depends on the position of the word being described
// in its allocated object. In the first word, the high bit is the GC ``marked'' bit.
// In the second word, the high bit is the GC ``checkmarked'' bit (see below).
// In the third and later words, the high bit indicates that the object is still
// being described. In these words, if a bit pair with a high bit 0 is encountered,
// the low bit can also be assumed to be 0, and the object description is over.
// This 00 is called the ``dead'' encoding: it signals that the rest of the words
// in the object are uninteresting to the garbage collector.
//
// The 2-bit entries are split when written into the byte, so that the top half
// of the byte contains 4 mark bits and the bottom half contains 4 pointer bits.
// This form allows a copy from the 1-bit to the 4-bit form to keep the
// pointer bits contiguous, instead of having to space them out.
//
// The code makes use of the fact that the zero value for a heap bitmap
// has no live pointer bit set and is (depending on position), not marked,
// not checkmarked, and is the dead encoding.
// These properties must be preserved when modifying the encoding.
//
// Checkmarks
//
// In a concurrent garbage collector, one worries about failing to mark
// a live object due to mutations without write barriers or bugs in the
// collector implementation. As a sanity check, the GC has a 'checkmark'
// mode that retraverses the object graph with the world stopped, to make
// sure that everything that should be marked is marked.
// In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
// for the second word of the object holds the checkmark bit.
// When not in checkmark mode, this bit is set to 1.
//
// The smallest possible allocation is 8 bytes. On a 32-bit machine, that
// means every allocated object has two words, so there is room for the
// checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
// just one word, so the second bit pair is not available for encoding the
// checkmark. However, because non-pointer allocations are combined
// into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
// must be a pointer, so the type bit in the first word is not actually needed.
// It is still used in general, except in checkmark the type bit is repurposed
// as the checkmark bit and then reinitialized (to 1) as the type bit when
// finished.
package runtime
import "unsafe"
const (
bitPointer = 1 << 0
bitMarked = 1 << 4
heapBitsShift = 1 // shift offset between successive bitPointer or bitMarked entries
heapBitmapScale = ptrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
// all mark/pointer bits in a byte
bitMarkedAll = bitMarked | bitMarked<<heapBitsShift | bitMarked<<(2*heapBitsShift) | bitMarked<<(3*heapBitsShift)
bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
)
// addb returns the byte pointer p+n.
//go:nowritebarrier
func addb(p *byte, n uintptr) *byte {
// Note: wrote out full expression instead of calling add(p, n)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
}
// subtractb returns the byte pointer p-n.
//go:nowritebarrier
func subtractb(p *byte, n uintptr) *byte {
// Note: wrote out full expression instead of calling add(p, -n)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
}
// add1 returns the byte pointer p+1.
//go:nowritebarrier
func add1(p *byte) *byte {
// Note: wrote out full expression instead of calling addb(p, 1)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
}
// subtract1 returns the byte pointer p-1.
//go:nowritebarrier
//
// nosplit because it is used during write barriers and must not be preempted.
//go:nosplit
func subtract1(p *byte) *byte {
// Note: wrote out full expression instead of calling subtractb(p, 1)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
}
// mHeap_MapBits is called each time arena_used is extended.
// It maps any additional bitmap memory needed for the new arena memory.
// It must be called with the expected new value of arena_used,
// *before* h.arena_used has been updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
//
//go:nowritebarrier
func mHeap_MapBits(h *mheap, arena_used uintptr) {
// Caller has added extra mappings to the arena.
// Add extra mappings of bitmap words as needed.
// We allocate extra bitmap pieces in chunks of bitmapChunk.
const bitmapChunk = 8192
n := (arena_used - mheap_.arena_start) / heapBitmapScale
n = round(n, bitmapChunk)
n = round(n, _PhysPageSize)
if h.bitmap_mapped >= n {
return
}
sysMap(unsafe.Pointer(h.arena_start-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
h.bitmap_mapped = n
}
// heapBits provides access to the bitmap bits for a single heap word.
// The methods on heapBits take value receivers so that the compiler
// can more easily inline calls to those methods and registerize the
// struct fields independently.
type heapBits struct {
bitp *uint8
shift uint32
}
// heapBitsForAddr returns the heapBits for the address addr.
// The caller must have already checked that addr is in the range [mheap_.arena_start, mheap_.arena_used).
//
// nosplit because it is used during write barriers and must not be preempted.
//go:nosplit
func heapBitsForAddr(addr uintptr) heapBits {
// 2 bits per work, 4 pairs per byte, and a mask is hard coded.
off := (addr - mheap_.arena_start) / ptrSize
return heapBits{(*uint8)(unsafe.Pointer(mheap_.arena_start - off/4 - 1)), uint32(off & 3)}
}
// heapBitsForSpan returns the heapBits for the span base address base.
func heapBitsForSpan(base uintptr) (hbits heapBits) {
if base < mheap_.arena_start || base >= mheap_.arena_used {
throw("heapBitsForSpan: base out of range")
}
hbits = heapBitsForAddr(base)
if hbits.shift != 0 {
throw("heapBitsForSpan: unaligned start")
}
return hbits
}
// heapBitsForObject returns the base address for the heap object
// containing the address p, along with the heapBits for base.
// If p does not point into a heap object,
// return base == 0
// otherwise return the base of the object.
//
// refBase and refOff optionally give the base address of the object
// in which the pointer p was found and the byte offset at which it
// was found. These are used for error reporting.
func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits, s *mspan) {
arenaStart := mheap_.arena_start
if p < arenaStart || p >= mheap_.arena_used {
return
}
off := p - arenaStart
idx := off >> _PageShift
// p points into the heap, but possibly to the middle of an object.
// Consult the span table to find the block beginning.
k := p >> _PageShift
s = h_spans[idx]
if s == nil || pageID(k) < s.start || p >= s.limit || s.state != mSpanInUse {
if s == nil || s.state == _MSpanStack {
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly.
return
}
// The following ensures that we are rigorous about what data
// structures hold valid pointers.
if debug.invalidptr != 0 {
// Typically this indicates an incorrect use
// of unsafe or cgo to store a bad pointer in
// the Go heap. It may also indicate a runtime
// bug.
//
// TODO(austin): We could be more aggressive
// and detect pointers to unallocated objects
// in allocated spans.
printlock()
print("runtime: pointer ", hex(p))
if s.state != mSpanInUse {
print(" to unallocated span")
} else {
print(" to unused region of span")
}
print("idx=", hex(idx), " span.start=", hex(s.start<<_PageShift), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
if refBase != 0 {
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
}
throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
}
return
}
// If this span holds object of a power of 2 size, just mask off the bits to
// the interior of the object. Otherwise use the size to get the base.
if s.baseMask != 0 {
// optimize for power of 2 sized objects.
base = s.base()
base = base + (p-base)&s.baseMask
// base = p & s.baseMask is faster for small spans,
// but doesn't work for large spans.
// Overall, it's faster to use the more general computation above.
} else {
base = s.base()
if p-base >= s.elemsize {
// n := (p - base) / s.elemsize, using division by multiplication
n := uintptr(uint64(p-base) >> s.divShift * uint64(s.divMul) >> s.divShift2)
base += n * s.elemsize
}
}
// Now that we know the actual base, compute heapBits to return to caller.
hbits = heapBitsForAddr(base)
return
}
// prefetch the bits.
func (h heapBits) prefetch() {
prefetchnta(uintptr(unsafe.Pointer((h.bitp))))
}
// next returns the heapBits describing the next pointer-sized word in memory.
// That is, if h describes address p, h.next() describes p+ptrSize.
// Note that next does not modify h. The caller must record the result.
//
// nosplit because it is used during write barriers and must not be preempted.
//go:nosplit
func (h heapBits) next() heapBits {
if h.shift < 3*heapBitsShift {
return heapBits{h.bitp, h.shift + heapBitsShift}
}
return heapBits{subtract1(h.bitp), 0}
}
// forward returns the heapBits describing n pointer-sized words ahead of h in memory.
// That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
// h.forward(1) is equivalent to h.next(), just slower.
// Note that forward does not modify h. The caller must record the result.
// bits returns the heap bits for the current word.
func (h heapBits) forward(n uintptr) heapBits {
n += uintptr(h.shift) / heapBitsShift
return heapBits{subtractb(h.bitp, n/4), uint32(n%4) * heapBitsShift}
}
// The caller can test isMarked and isPointer by &-ing with bitMarked and bitPointer.
// The result includes in its higher bits the bits for subsequent words
// described by the same bitmap byte.
func (h heapBits) bits() uint32 {
return uint32(*h.bitp) >> h.shift
}
// isMarked reports whether the heap bits have the marked bit set.
// h must describe the initial word of the object.
func (h heapBits) isMarked() bool {
return *h.bitp&(bitMarked<<h.shift) != 0
}
// setMarked sets the marked bit in the heap bits, atomically.
// h must describe the initial word of the object.
func (h heapBits) setMarked() {
// Each byte of GC bitmap holds info for four words.
// Might be racing with other updates, so use atomic update always.
// We used to be clever here and use a non-atomic update in certain
// cases, but it's not worth the risk.
atomicor8(h.bitp, bitMarked<<h.shift)
}
// setMarkedNonAtomic sets the marked bit in the heap bits, non-atomically.
// h must describe the initial word of the object.
func (h heapBits) setMarkedNonAtomic() {
*h.bitp |= bitMarked << h.shift
}
// isPointer reports whether the heap bits describe a pointer word.
// h must describe the initial word of the object.
//
// nosplit because it is used during write barriers and must not be preempted.
//go:nosplit
func (h heapBits) isPointer() bool {
return (*h.bitp>>h.shift)&bitPointer != 0
}
// hasPointers reports whether the given object has any pointers.
// It must be told how large the object at h is, so that it does not read too
// far into the bitmap.
// h must describe the initial word of the object.
func (h heapBits) hasPointers(size uintptr) bool {
if size == ptrSize { // 1-word objects are always pointers
return true
}
// Otherwise, at least a 2-word object, and at least 2-word aligned,
// so h.shift is either 0 or 4, so we know we can get the bits for the
// first two words out of *h.bitp.
// If either of the first two words is a pointer, not pointer free.
b := uint32(*h.bitp >> h.shift)
if b&(bitPointer|bitPointer<<heapBitsShift) != 0 {
return true
}
if size == 2*ptrSize {
return false
}
// At least a 4-word object. Check scan bit (aka marked bit) in third word.
if h.shift == 0 {
return b&(bitMarked<<(2*heapBitsShift)) != 0
}
return uint32(*subtract1(h.bitp))&bitMarked != 0
}
// isCheckmarked reports whether the heap bits have the checkmarked bit set.
// It must be told how large the object at h is, because the encoding of the
// checkmark bit varies by size.
// h must describe the initial word of the object.
func (h heapBits) isCheckmarked(size uintptr) bool {
if size == ptrSize {
return (*h.bitp>>h.shift)&bitPointer != 0
}
// All multiword objects are 2-word aligned,
// so we know that the initial word's 2-bit pair
// and the second word's 2-bit pair are in the
// same heap bitmap byte, *h.bitp.
return (*h.bitp>>(heapBitsShift+h.shift))&bitMarked != 0
}
// setCheckmarked sets the checkmarked bit.
// It must be told how large the object at h is, because the encoding of the
// checkmark bit varies by size.
// h must describe the initial word of the object.
func (h heapBits) setCheckmarked(size uintptr) {
if size == ptrSize {
atomicor8(h.bitp, bitPointer<<h.shift)
return
}
atomicor8(h.bitp, bitMarked<<(heapBitsShift+h.shift))
}
// heapBitsBulkBarrier executes writebarrierptr_nostore
// for every pointer slot in the memory range [p, p+size),
// using the heap bitmap to locate those pointer slots.
// This executes the write barriers necessary after a memmove.
// Both p and size must be pointer-aligned.
// The range [p, p+size) must lie within a single allocation.
//
// Callers should call heapBitsBulkBarrier immediately after
// calling memmove(p, src, size). This function is marked nosplit
// to avoid being preempted; the GC must not stop the goroutine
// between the memmove and the execution of the barriers.
//
// The heap bitmap is not maintained for allocations containing
// no pointers at all; any caller of heapBitsBulkBarrier must first
// make sure the underlying allocation contains pointers, usually
// by checking typ.kind&kindNoPointers.
//
//go:nosplit
func heapBitsBulkBarrier(p, size uintptr) {
if (p|size)&(ptrSize-1) != 0 {
throw("heapBitsBulkBarrier: unaligned arguments")
}
if !writeBarrierEnabled {
return
}
if !inheap(p) {
// If p is on the stack and in a higher frame than the
// caller, we either need to execute write barriers on
// it (which is what happens for normal stack writes
// through pointers to higher frames), or we need to
// force the mark termination stack scan to scan the
// frame containing p.
//
// Executing write barriers on p is complicated in the
// general case because we either need to unwind the
// stack to get the stack map, or we need the type's
// bitmap, which may be a GC program.
//
// Hence, we opt for forcing the re-scan to scan the
// frame containing p, which we can do by simply
// unwinding the stack barriers between the current SP
// and p's frame.
gp := getg().m.curg
if gp != nil && gp.stack.lo <= p && p < gp.stack.hi {
// Run on the system stack to give it more
// stack space.
systemstack(func() {
gcUnwindBarriers(gp, p)
})
}
return
}
h := heapBitsForAddr(p)
for i := uintptr(0); i < size; i += ptrSize {
if h.isPointer() {
x := (*uintptr)(unsafe.Pointer(p + i))
writebarrierptr_nostore(x, *x)
}
h = h.next()
}
}
// typeBitsBulkBarrier executes writebarrierptr_nostore
// for every pointer slot in the memory range [p, p+size),
// using the type bitmap to locate those pointer slots.
// The type typ must correspond exactly to [p, p+size).
// This executes the write barriers necessary after a copy.
// Both p and size must be pointer-aligned.
// The type typ must have a plain bitmap, not a GC program.
// The only use of this function is in channel sends, and the
// 64 kB channel element limit takes care of this for us.
//
// Must not be preempted because it typically runs right after memmove,
// and the GC must not complete between those two.
//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, p, size uintptr) {
if typ == nil {
throw("runtime: typeBitsBulkBarrier without type")
}
if typ.size != size {
println("runtime: typeBitsBulkBarrier with type ", *typ._string, " of size ", typ.size, " but memory size", size)
throw("runtime: invalid typeBitsBulkBarrier")
}
if typ.kind&kindGCProg != 0 {
println("runtime: typeBitsBulkBarrier with type ", *typ._string, " with GC prog")
throw("runtime: invalid typeBitsBulkBarrier")
}
if !writeBarrierEnabled {
return
}
ptrmask := typ.gcdata
var bits uint32
for i := uintptr(0); i < typ.ptrdata; i += ptrSize {
if i&(ptrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits = bits >> 1
}
if bits&1 != 0 {
x := (*uintptr)(unsafe.Pointer(p + i))
writebarrierptr_nostore(x, *x)
}
}
}
// The methods operating on spans all require that h has been returned
// by heapBitsForSpan and that size, n, total are the span layout description
// returned by the mspan's layout method.
// If total > size*n, it means that there is extra leftover memory in the span,
// usually due to rounding.
//
// TODO(rsc): Perhaps introduce a different heapBitsSpan type.
// initSpan initializes the heap bitmap for a span.
func (h heapBits) initSpan(size, n, total uintptr) {
if total%heapBitmapScale != 0 {
throw("initSpan: unaligned length")
}
nbyte := total / heapBitmapScale
if ptrSize == 8 && size == ptrSize {
end := h.bitp
bitp := subtractb(end, nbyte-1)
for {
*bitp = bitPointerAll
if bitp == end {
break
}
bitp = add1(bitp)
}
return
}
memclr(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte)
}
// initCheckmarkSpan initializes a span for being checkmarked.
// It clears the checkmark bits, which are set to 1 in normal operation.
func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
if ptrSize == 8 && size == ptrSize {
// Checkmark bit is type bit, bottom bit of every 2-bit entry.
// Only possible on 64-bit system, since minimum size is 8.
// Must clear type bit (checkmark bit) of every word.
// The type bit is the lower of every two-bit pair.
bitp := h.bitp
for i := uintptr(0); i < n; i += 4 {
*bitp &^= bitPointerAll
bitp = subtract1(bitp)
}
return
}
for i := uintptr(0); i < n; i++ {
*h.bitp &^= bitMarked << (heapBitsShift + h.shift)
h = h.forward(size / ptrSize)
}
}
// clearCheckmarkSpan undoes all the checkmarking in a span.
// The actual checkmark bits are ignored, so the only work to do
// is to fix the pointer bits. (Pointer bits are ignored by scanobject
// but consulted by typedmemmove.)
func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
if ptrSize == 8 && size == ptrSize {
// Checkmark bit is type bit, bottom bit of every 2-bit entry.
// Only possible on 64-bit system, since minimum size is 8.
// Must clear type bit (checkmark bit) of every word.
// The type bit is the lower of every two-bit pair.
bitp := h.bitp
for i := uintptr(0); i < n; i += 4 {
*bitp |= bitPointerAll
bitp = subtract1(bitp)
}
}
}
// heapBitsSweepSpan coordinates the sweeping of a span by reading
// and updating the corresponding heap bitmap entries.
// For each free object in the span, heapBitsSweepSpan sets the type
// bits for the first two words (or one for single-word objects) to typeDead
// and then calls f(p), where p is the object's base address.
// f is expected to add the object to a free list.
// For non-free objects, heapBitsSweepSpan turns off the marked bit.
func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
h := heapBitsForSpan(base)
switch {
default:
throw("heapBitsSweepSpan")
case ptrSize == 8 && size == ptrSize:
// Consider mark bits in all four 2-bit entries of each bitmap byte.
bitp := h.bitp
for i := uintptr(0); i < n; i += 4 {
x := uint32(*bitp)
// Note that unlike the other size cases, we leave the pointer bits set here.
// These are initialized during initSpan when the span is created and left
// in place the whole time the span is used for pointer-sized objects.
// That lets heapBitsSetType avoid an atomic update to set the pointer bit
// during allocation.
if x&bitMarked != 0 {
x &^= bitMarked
} else {
f(base + i*ptrSize)
}
if x&(bitMarked<<heapBitsShift) != 0 {
x &^= bitMarked << heapBitsShift
} else {
f(base + (i+1)*ptrSize)
}
if x&(bitMarked<<(2*heapBitsShift)) != 0 {
x &^= bitMarked << (2 * heapBitsShift)
} else {
f(base + (i+2)*ptrSize)
}
if x&(bitMarked<<(3*heapBitsShift)) != 0 {
x &^= bitMarked << (3 * heapBitsShift)
} else {
f(base + (i+3)*ptrSize)
}
*bitp = uint8(x)
bitp = subtract1(bitp)
}
case size%(4*ptrSize) == 0:
// Mark bit is in first word of each object.
// Each object starts at bit 0 of a heap bitmap byte.
bitp := h.bitp
step := size / heapBitmapScale
for i := uintptr(0); i < n; i++ {
x := uint32(*bitp)
if x&bitMarked != 0 {
x &^= bitMarked
} else {
x = 0
f(base + i*size)
}
*bitp = uint8(x)
bitp = subtractb(bitp, step)
}
case size%(4*ptrSize) == 2*ptrSize:
// Mark bit is in first word of each object,
// but every other object starts halfway through a heap bitmap byte.
// Unroll loop 2x to handle alternating shift count and step size.
bitp := h.bitp
step := size / heapBitmapScale
var i uintptr
for i = uintptr(0); i < n; i += 2 {
x := uint32(*bitp)
if x&bitMarked != 0 {
x &^= bitMarked
} else {
x &^= bitMarked | bitPointer | (bitMarked|bitPointer)<<heapBitsShift
f(base + i*size)
if size > 2*ptrSize {
x = 0
}
}
*bitp = uint8(x)
if i+1 >= n {
break
}
bitp = subtractb(bitp, step)
x = uint32(*bitp)
if x&(bitMarked<<(2*heapBitsShift)) != 0 {
x &^= bitMarked << (2 * heapBitsShift)
} else {
x &^= (bitMarked|bitPointer)<<(2*heapBitsShift) | (bitMarked|bitPointer)<<(3*heapBitsShift)
f(base + (i+1)*size)
if size > 2*ptrSize {
*subtract1(bitp) = 0
}
}
*bitp = uint8(x)
bitp = subtractb(bitp, step+1)
}
}
}
// heapBitsSetType records that the new allocation [x, x+size)
// holds in [x, x+dataSize) one or more values of type typ.
// (The number of values is given by dataSize / typ.size.)
// If dataSize < size, the fragment [x+dataSize, x+size) is
// recorded as non-pointer data.
// It is known that the type has pointers somewhere;
// malloc does not call heapBitsSetType when there are no pointers,
// because all free objects are marked as noscan during
// heapBitsSweepSpan.
// There can only be one allocation from a given span active at a time,
// so this code is not racing with other instances of itself,
// and we don't allocate from a span until it has been swept,
// so this code is not racing with heapBitsSweepSpan.
// It is, however, racing with the concurrent GC mark phase,
// which can be setting the mark bit in the leading 2-bit entry
// of an allocated block. The block we are modifying is not quite
// allocated yet, so the GC marker is not racing with updates to x's bits,
// but if the start or end of x shares a bitmap byte with an adjacent
// object, the GC marker is racing with updates to those object's mark bits.
func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
const doubleCheck = false // slow but helpful; enable to test modifications to this code
// dataSize is always size rounded up to the next malloc size class,
// except in the case of allocating a defer block, in which case
// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
// arbitrarily larger.
//
// The checks for size == ptrSize and size == 2*ptrSize can therefore
// assume that dataSize == size without checking it explicitly.
if ptrSize == 8 && size == ptrSize {
// It's one word and it has pointers, it must be a pointer.
// In general we'd need an atomic update here if the
// concurrent GC were marking objects in this span,
// because each bitmap byte describes 3 other objects
// in addition to the one being allocated.
// However, since all allocated one-word objects are pointers
// (non-pointers are aggregated into tinySize allocations),
// initSpan sets the pointer bits for us. Nothing to do here.
if doubleCheck {
h := heapBitsForAddr(x)
if !h.isPointer() {
throw("heapBitsSetType: pointer bit missing")
}
}
return
}
h := heapBitsForAddr(x)
ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
// Heap bitmap bits for 2-word object are only 4 bits,
// so also shared with objects next to it; use atomic updates.
// This is called out as a special case primarily for 32-bit systems,
// so that on 32-bit systems the code below can assume all objects
// are 4-word aligned (because they're all 16-byte aligned).
if size == 2*ptrSize {
if typ.size == ptrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
// On 32-bit, however, this is the 8-byte block, the smallest one.
// So it could be that we're allocating one pointer and this was
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
if ptrSize == 4 && dataSize == ptrSize {
// 1 pointer.
if gcphase == _GCoff {
*h.bitp |= bitPointer << h.shift
} else {
atomicor8(h.bitp, bitPointer<<h.shift)
}
} else {
// 2-element slice of pointer.
if gcphase == _GCoff {
*h.bitp |= (bitPointer | bitPointer<<heapBitsShift) << h.shift
} else {
atomicor8(h.bitp, (bitPointer|bitPointer<<heapBitsShift)<<h.shift)
}
}
return
}
// Otherwise typ.size must be 2*ptrSize, and typ.kind&kindGCProg == 0.
if doubleCheck {
if typ.size != 2*ptrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
}
b := uint32(*ptrmask)
hb := b & 3
if gcphase == _GCoff {
*h.bitp |= uint8(hb << h.shift)
} else {
atomicor8(h.bitp, uint8(hb<<h.shift))
}
return
}
// Copy from 1-bit ptrmask into 2-bit bitmap.
// The basic approach is to use a single uintptr as a bit buffer,
// alternating between reloading the buffer and writing bitmap bytes.
// In general, one load can supply two bitmap byte writes.
// This is a lot of lines of code, but it compiles into relatively few
// machine instructions.
var (
// Ptrmask input.
p *byte // last ptrmask byte read
b uintptr // ptrmask bits already loaded
nb uintptr // number of bits in b at next read
endp *byte // final ptrmask byte to read (then repeat)
endnb uintptr // number of valid bits in *endp
pbits uintptr // alternate source of bits
// Heap bitmap output.
w uintptr // words processed
nw uintptr // number of words to process
hbitp *byte // next heap bitmap byte to write
hb uintptr // bits being prepared for *hbitp
)
hbitp = h.bitp
// Handle GC program. Delayed until this part of the code
// so that we can use the same double-checking mechanism
// as the 1-bit case. Nothing above could have encountered
// GC programs: the cases were all too small.
if typ.kind&kindGCProg != 0 {
heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
if doubleCheck {
// Double-check the heap bits written by GC program
// by running the GC program to create a 1-bit pointer mask
// and then jumping to the double-check code below.
// This doesn't catch bugs shared between the 1-bit and 4-bit
// GC program execution, but it does catch mistakes specific
// to just one of those and bugs in heapBitsSetTypeGCProg's
// implementation of arrays.
lock(&debugPtrmask.lock)
if debugPtrmask.data == nil {
debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
}
ptrmask = debugPtrmask.data
runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
goto Phase4
}
return
}
// Note about sizes:
//
// typ.size is the number of words in the object,
// and typ.ptrdata is the number of words in the prefix
// of the object that contains pointers. That is, the final
// typ.size - typ.ptrdata words contain no pointers.
// This allows optimization of a common pattern where
// an object has a small header followed by a large scalar
// buffer. If we know the pointers are over, we don't have
// to scan the buffer's heap bitmap at all.
// The 1-bit ptrmasks are sized to contain only bits for
// the typ.ptrdata prefix, zero padded out to a full byte
// of bitmap. This code sets nw (below) so that heap bitmap
// bits are only written for the typ.ptrdata prefix; if there is
// more room in the allocated object, the next heap bitmap
// entry is a 00, indicating that there are no more pointers
// to scan. So only the ptrmask for the ptrdata bytes is needed.
//
// Replicated copies are not as nice: if there is an array of
// objects with scalar tails, all but the last tail does have to
// be initialized, because there is no way to say "skip forward".
// However, because of the possibility of a repeated type with
// size not a multiple of 4 pointers (one heap bitmap byte),
// the code already must handle the last ptrmask byte specially
// by treating it as containing only the bits for endnb pointers,
// where endnb <= 4. We represent large scalar tails that must
// be expanded in the replication by setting endnb larger than 4.
// This will have the effect of reading many bits out of b,
// but once the real bits are shifted out, b will supply as many
// zero bits as we try to read, which is exactly what we need.
p = ptrmask
if typ.size < dataSize {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
const maxBits = ptrSize*8 - 7
if typ.ptrdata/ptrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
// fewer than 8 bits in it; otherwise the reload in the middle
// of the Phase 2 loop would itself need to loop to gather
// at least 8 bits.
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
nb = typ.ptrdata / ptrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
nb = typ.size / ptrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
// iterating by nb each time. (nb could be 1.)
// Since we loaded typ.ptrdata/ptrSize bits
// but are pretending to have typ.size/ptrSize,
// there might be no replication necessary/possible.
pbits = b
endnb = nb
if nb+nb <= maxBits {
for endnb <= ptrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
// Truncate to a multiple of original ptrmask.
endnb = maxBits / nb * nb
pbits &= 1<<endnb - 1
b = pbits
nb = endnb
}
// Clear p and endp as sentinel for using pbits.
// Checked during Phase 2 loop.
p = nil
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
n := (typ.ptrdata/ptrSize+7)/8 - 1
endp = addb(ptrmask, n)
endnb = typ.size/ptrSize - n*8
}
}
if p != nil {
b = uintptr(*p)
p = add1(p)
nb = 8
}
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
nw = typ.ptrdata / ptrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / ptrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
println("runtime: invalid type ", *typ._string)
throw("heapBitsSetType: called with non-pointer type")
return
}
if nw < 2 {
// Must write at least 2 words, because the "no scan"
// encoding doesn't take effect until the third word.
nw = 2
}
// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==4).
// The leading byte is special because it contains the bits for words 0 and 1,
// which do not have the marked bits set.
// The leading half-byte is special because it's a half a byte and must be
// manipulated atomically.
switch {
default:
throw("heapBitsSetType: unexpected shift")
case h.shift == 0:
// Ptrmask and heap bitmap are aligned.
// Handle first byte of bitmap specially.
// The first byte we write out contains the first two words of the object.
// In those words, the mark bits are mark and checkmark, respectively,
// and must not be set. In all following words, we want to set the mark bit
// as a signal that the object continues to the next 2-bit entry in the bitmap.
hb = b & bitPointerAll
hb |= bitMarked<<(2*heapBitsShift) | bitMarked<<(3*heapBitsShift)
if w += 4; w >= nw {
goto Phase3
}
*hbitp = uint8(hb)
hbitp = subtract1(hbitp)
b >>= 4
nb -= 4
case ptrSize == 8 && h.shift == 2:
// Ptrmask and heap bitmap are misaligned.
// The bits for the first two words are in a byte shared with another object
// and must be updated atomically.
// NOTE(rsc): The atomic here may not be necessary.
// We took care of 1-word and 2-word objects above,
// so this is at least a 6-word object, so our start bits
// are shared only with the type bits of another object,
// not with its mark bit. Since there is only one allocation
// from a given span at a time, we should be able to set
// these bits non-atomically. Not worth the risk right now.
hb = (b & 3) << (2 * heapBitsShift)
b >>= 2
nb -= 2
// Note: no bitMarker in hb because the first two words don't get markers from us.
if gcphase == _GCoff {
*hbitp |= uint8(hb)
} else {
atomicor8(hbitp, uint8(hb))
}
hbitp = subtract1(hbitp)
if w += 2; w >= nw {
// We know that there is more data, because we handled 2-word objects above.
// This must be at least a 6-word object. If we're out of pointer words,
// mark no scan in next bitmap byte and finish.
hb = 0
w += 4
goto Phase3
}
}
// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
// The loop computes the bits for that last write but does not execute the write;
// it leaves the bits in hb for processing by phase 3.
// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
// use in the first half of the loop right now, and then we only adjust nb explicitly
// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
nb -= 4
for {
// Emit bitmap byte.
// b has at least nb+4 bits, with one exception:
// if w+4 >= nw, then b has only nw-w bits,
// but we'll stop at the break and then truncate
// appropriately in Phase 3.
hb = b & bitPointerAll
hb |= bitMarkedAll
if w += 4; w >= nw {
break
}
*hbitp = uint8(hb)
hbitp = subtract1(hbitp)
b >>= 4
// Load more bits. b has nb right now.
if p != endp {
// Fast path: keep reading from ptrmask.