-
-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
inlining.jl
1861 lines (1727 loc) · 76.7 KB
/
inlining.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# This file is a part of Julia. License is MIT: https://julialang.org/license
struct Signature
f::Any
ft::Any
argtypes::Vector{Any}
Signature(@nospecialize(f), @nospecialize(ft), argtypes::Vector{Any}) = new(f, ft, argtypes)
end
struct InliningTodo
# The MethodInstance to be inlined
mi::MethodInstance
# The IR of the inlinee
ir::IRCode
# The SpecInfo for the inlinee
spec_info::SpecInfo
# The DebugInfo table for the inlinee
di::DebugInfo
# If the function being inlined is a single basic block we can use a
# simpler inlining algorithm. This flag determines whether that's allowed
linear_inline_eligible::Bool
# Effects of the call statement
effects::Effects
end
function InliningTodo(mi::MethodInstance, ir::IRCode, spec_info::SpecInfo, di::DebugInfo, effects::Effects)
return InliningTodo(mi, ir, spec_info, di, linear_inline_eligible(ir), effects)
end
struct ConstantCase
val::Any
ConstantCase(@nospecialize val) = new(val)
end
struct SomeCase
val::Any
SomeCase(@nospecialize val) = new(val)
end
struct InvokeCase
invoke::MethodInstance
effects::Effects
info::CallInfo
end
struct InliningCase
sig # Type
item # Union{InliningTodo, InvokeCase, ConstantCase}
function InliningCase(@nospecialize(sig), @nospecialize(item))
@assert isa(item, Union{InliningTodo, InvokeCase, ConstantCase}) "invalid inlining item"
return new(sig, item)
end
end
struct UnionSplit
handled_all_cases::Bool # All possible dispatches are included in the cases
fully_covered::Bool # All handled cases are fully covering
atype::DataType
cases::Vector{InliningCase}
bbs::Vector{Int}
UnionSplit(handled_all_cases::Bool, fully_covered::Bool, atype::DataType, cases::Vector{InliningCase}) =
new(handled_all_cases, fully_covered, atype, cases, Int[])
end
struct InliningEdgeTracker
edges::Vector{Any}
invokesig::Union{Nothing,Vector{Any}}
InliningEdgeTracker(state::InliningState, invokesig::Union{Nothing,Vector{Any}}=nothing) =
new(state.edges, invokesig)
end
function add_inlining_backedge!((; edges, invokesig)::InliningEdgeTracker, mi::MethodInstance)
if invokesig === nothing
push!(edges, mi)
else # invoke backedge
push!(edges, invoke_signature(invokesig), mi)
end
return nothing
end
function ssa_inlining_pass!(ir::IRCode, state::InliningState, propagate_inbounds::Bool)
# Go through the function, performing simple inlining (e.g. replacing call by constants
# and analyzing legality of inlining).
@timeit "analysis" todo = assemble_inline_todo!(ir, state)
isempty(todo) && return ir
# Do the actual inlining for every call we identified
@timeit "execution" ir = batch_inline!(ir, todo, propagate_inbounds, state.interp)
return ir
end
mutable struct CFGInliningState
new_cfg_blocks::Vector{BasicBlock}
todo_bbs::Vector{Tuple{Int, Int}}
first_bb::Int
bb_rename::Vector{Int}
dead_blocks::Vector{Int}
split_targets::BitSet
merged_orig_blocks::BitSet
cfg::CFG
end
function CFGInliningState(ir::IRCode)
CFGInliningState(
BasicBlock[],
Tuple{Int, Int}[],
0,
zeros(Int, length(ir.cfg.blocks)),
Vector{Int}(),
BitSet(),
BitSet(),
ir.cfg
)
end
# Tells the inliner that we're now inlining into block `block`, meaning
# all previous blocks have been processed and can be added to the new cfg
function inline_into_block!(state::CFGInliningState, block::Int)
if state.first_bb != block
new_range = state.first_bb+1:block
l = length(state.new_cfg_blocks)
state.bb_rename[new_range] = (l+1:l+length(new_range))
append!(state.new_cfg_blocks, (copy(block) for block in state.cfg.blocks[new_range]))
push!(state.merged_orig_blocks, last(new_range))
end
state.first_bb = block
return
end
function cfg_inline_item!(ir::IRCode, idx::Int, todo::InliningTodo, state::CFGInliningState, from_unionsplit::Bool=false)
inlinee_cfg = todo.ir.cfg
# Figure out if we need to split the BB
need_split_before = false
need_split = true
block = block_for_inst(ir, idx)
inline_into_block!(state, block)
if !isempty(inlinee_cfg.blocks[1].preds)
need_split_before = true
end
last_block_idx = last(state.cfg.blocks[block].stmts)
if false # TODO: ((idx+1) == last_block_idx && isa(ir[SSAValue(last_block_idx)], GotoNode))
need_split = false
post_bb_id = -ir[SSAValue(last_block_idx)][:stmt].label
else
post_bb_id = length(state.new_cfg_blocks) + length(inlinee_cfg.blocks) + (need_split_before ? 1 : 0)
need_split = true #!(idx == last_block_idx)
end
need_split || delete!(state.merged_orig_blocks, last(new_range))
push!(state.todo_bbs, (length(state.new_cfg_blocks) - 1 + (need_split_before ? 1 : 0), post_bb_id))
from_unionsplit || delete!(state.split_targets, length(state.new_cfg_blocks))
local orig_succs
need_split && (orig_succs = copy(state.new_cfg_blocks[end].succs))
empty!(state.new_cfg_blocks[end].succs)
if need_split_before
l = length(state.new_cfg_blocks)
bb_rename_range = (1+l:length(inlinee_cfg.blocks)+l)
push!(state.new_cfg_blocks[end].succs, length(state.new_cfg_blocks)+1)
append!(state.new_cfg_blocks, inlinee_cfg.blocks)
else
# Merge the last block that was already there with the first block we're adding
l = length(state.new_cfg_blocks)
bb_rename_range = (l:length(inlinee_cfg.blocks)+l-1)
append!(state.new_cfg_blocks[end].succs, inlinee_cfg.blocks[1].succs)
append!(state.new_cfg_blocks, inlinee_cfg.blocks[2:end])
end
if need_split
push!(state.new_cfg_blocks, BasicBlock(state.cfg.blocks[block].stmts,
Int[], orig_succs))
from_unionsplit || push!(state.split_targets, length(state.new_cfg_blocks))
end
new_block_range = (length(state.new_cfg_blocks)-length(inlinee_cfg.blocks)+1):length(state.new_cfg_blocks)
# Fixup the edges of the newely added blocks
for (old_block, new_block) in enumerate(bb_rename_range)
if old_block != 1 || need_split_before
p = state.new_cfg_blocks[new_block].preds
let bb_rename_range = bb_rename_range
map!(p, p) do old_pred_block
return old_pred_block == 0 ? 0 : bb_rename_range[old_pred_block]
end
end
end
if new_block != last(new_block_range)
s = state.new_cfg_blocks[new_block].succs
let bb_rename_range = bb_rename_range
map!(s, s) do old_succ_block
return bb_rename_range[old_succ_block]
end
end
end
end
if need_split_before
push!(state.new_cfg_blocks[first(bb_rename_range)].preds, first(bb_rename_range)-1)
end
any_edges = false
for (old_block, new_block) in enumerate(bb_rename_range)
if (length(state.new_cfg_blocks[new_block].succs) == 0)
terminator_idx = last(inlinee_cfg.blocks[old_block].stmts)
terminator = todo.ir[SSAValue(terminator_idx)][:stmt]
if isa(terminator, ReturnNode) && isdefined(terminator, :val)
any_edges = true
push!(state.new_cfg_blocks[new_block].succs, post_bb_id)
if need_split
push!(state.new_cfg_blocks[post_bb_id].preds, new_block)
end
end
end
end
any_edges || push!(state.dead_blocks, post_bb_id)
return nothing
end
function cfg_inline_unionsplit!(ir::IRCode, idx::Int, union_split::UnionSplit,
state::CFGInliningState, params::OptimizationParams)
(; handled_all_cases, fully_covered, #=atype,=# cases, bbs) = union_split
inline_into_block!(state, block_for_inst(ir, idx))
from_bbs = Int[]
delete!(state.split_targets, length(state.new_cfg_blocks))
orig_succs = copy(state.new_cfg_blocks[end].succs)
empty!(state.new_cfg_blocks[end].succs)
for i in 1:length(cases)
# The condition gets sunk into the previous block
# Add a block for the union-split body
push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx)))
cond_bb = length(state.new_cfg_blocks)-1
push!(state.new_cfg_blocks[end].preds, cond_bb)
push!(state.new_cfg_blocks[cond_bb].succs, cond_bb+1)
case = cases[i].item
if isa(case, InliningTodo)
if !case.linear_inline_eligible
cfg_inline_item!(ir, idx, case, state, true)
end
end
push!(from_bbs, length(state.new_cfg_blocks))
if !(i == length(cases) && (handled_all_cases && fully_covered))
# This block will have the next condition or the final else case
push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx)))
push!(state.new_cfg_blocks[cond_bb].succs, length(state.new_cfg_blocks))
push!(state.new_cfg_blocks[end].preds, cond_bb)
push!(bbs, length(state.new_cfg_blocks))
end
end
# The edge from the fallback block.
# NOTE This edge is only required for `!handled_all_cases` and not `!fully_covered`,
# since in the latter case we inline `Core.throw_methoderror` into the fallback
# block, which is must-throw, making the subsequent code path unreachable.
!handled_all_cases && push!(from_bbs, length(state.new_cfg_blocks))
# This block will be the block everyone returns to
push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx), from_bbs, orig_succs))
join_bb = length(state.new_cfg_blocks)
push!(state.split_targets, join_bb)
push!(bbs, join_bb)
for bb in from_bbs
push!(state.new_cfg_blocks[bb].succs, join_bb)
end
end
function finish_cfg_inline!(state::CFGInliningState)
new_range = (state.first_bb + 1):length(state.cfg.blocks)
state.bb_rename[new_range] = let
l = length(state.new_cfg_blocks)
l+1:l+length(new_range)
end
append!(state.new_cfg_blocks, state.cfg.blocks[new_range])
# Rename edges original bbs
for (orig_bb, bb) in pairs(state.bb_rename)
p, s = state.new_cfg_blocks[bb].preds, state.new_cfg_blocks[bb].succs
map!(p, p) do pred_bb
pred_bb == length(state.bb_rename) && return length(state.new_cfg_blocks)
return state.bb_rename[pred_bb + 1] - 1
end
if !(orig_bb in state.merged_orig_blocks)
map!(s, s) do succ_bb
return state.bb_rename[succ_bb]
end
end
end
for bb in collect(state.split_targets)
s = state.new_cfg_blocks[bb].succs
map!(s, s) do succ_bb
return state.bb_rename[succ_bb]
end
end
# Rename any annotated original bb references
for bb in 1:length(state.new_cfg_blocks)
s = state.new_cfg_blocks[bb].succs
map!(s, s) do succ_bb
return succ_bb < 0 ? state.bb_rename[-succ_bb] : succ_bb
end
end
# Kill dead blocks
for block in state.dead_blocks
for succ in state.new_cfg_blocks[block].succs
kill_edge!(state.new_cfg_blocks, block, succ)
end
end
end
# TODO append `inlinee_debuginfo` to inner linetable when `inlined_at[2] ≠ 0`
function ir_inline_linetable!(debuginfo::DebugInfoStream, inlinee_debuginfo::DebugInfo, inlined_at::NTuple{3,Int32})
# Append the linetable of the inlined function to our edges table
linetable_offset = 1
while true
if linetable_offset > length(debuginfo.edges)
push!(debuginfo.edges, inlinee_debuginfo)
break
elseif debuginfo.edges[linetable_offset] === inlinee_debuginfo
break
end
linetable_offset += 1
end
return (inlined_at[1], Int32(linetable_offset), Int32(0))
end
function ir_prepare_inlining!(insert_node!::Inserter, inline_target::Union{IRCode, IncrementalCompact},
ir::IRCode, spec_info::SpecInfo, di::DebugInfo, mi::MethodInstance,
inlined_at::NTuple{3,Int32}, argexprs::Vector{Any})
def = mi.def::Method
debuginfo = inline_target isa IRCode ? inline_target.debuginfo : inline_target.ir.debuginfo
topline = new_inlined_at = ir_inline_linetable!(debuginfo, di, inlined_at)
if should_insert_coverage(def.module, di)
insert_node!(NewInstruction(Expr(:code_coverage_effect), Nothing, topline))
end
spvals_ssa = nothing
if !validate_sparams(mi.sparam_vals)
# N.B. This works on the caller-side argexprs, (i.e. before the va fixup below)
spvals_ssa = insert_node!(
removable_if_unused(NewInstruction(Expr(:call, Core._compute_sparams, def, argexprs...), SimpleVector, topline)))
end
if spec_info.isva
nargs_def = spec_info.nargs
if nargs_def > 0
argexprs = fix_va_argexprs!(insert_node!, inline_target, argexprs, nargs_def, topline)
end
end
if def.is_for_opaque_closure
# Replace the first argument by a load of the capture environment
argexprs[1] = insert_node!(
NewInstruction(Expr(:call, GlobalRef(Core, :getfield), argexprs[1], QuoteNode(:captures)),
ir.argtypes[1], topline))
end
return SSASubstitute(mi, argexprs, spvals_ssa, new_inlined_at)
end
function adjust_boundscheck!(inline_compact::IncrementalCompact, idx′::Int, stmt::Expr, boundscheck::Symbol)
if boundscheck === :off
isempty(stmt.args) && push!(stmt.args, false)
elseif boundscheck !== :propagate
isempty(stmt.args) && push!(stmt.args, true)
end
return nothing
end
function ir_inline_item!(compact::IncrementalCompact, idx::Int, argexprs::Vector{Any},
item::InliningTodo, boundscheck::Symbol, todo_bbs::Vector{Tuple{Int, Int}})
# Ok, do the inlining here
inlined_at = compact.result[idx][:line]
ssa_substitute = ir_prepare_inlining!(InsertHere(compact), compact, item.ir, item.spec_info, item.di, item.mi, inlined_at, argexprs)
boundscheck = has_flag(compact.result[idx], IR_FLAG_INBOUNDS) ? :off : boundscheck
# If the iterator already moved on to the next basic block,
# temporarily re-open it again.
local return_value
# Special case inlining that maintains the current basic block if there's only one BB in the target
new_new_offset = length(compact.new_new_nodes)
late_fixup_offset = length(compact.late_fixup)
if item.linear_inline_eligible
#compact[idx] = nothing
inline_compact = IncrementalCompact(compact, item.ir, compact.result_idx)
@assert isempty(inline_compact.perm) && isempty(inline_compact.pending_perm) "linetable not in canonical form (missing compact call)"
for ((lineidx, idx′), stmt′) in inline_compact
# This dance is done to maintain accurate usage counts in the
# face of rename_arguments! mutating in place - should figure out
# something better eventually.
inline_compact[idx′] = nothing
# alter the line number information for InsertBefore to point to the current instruction in the new linetable
inline_compact[SSAValue(idx′)][:line] = (ssa_substitute.inlined_at[1], ssa_substitute.inlined_at[2], Int32(lineidx))
insert_node! = InsertBefore(inline_compact, SSAValue(idx′))
stmt′ = ssa_substitute_op!(insert_node!, inline_compact[SSAValue(idx′)], stmt′, ssa_substitute)
if isa(stmt′, ReturnNode)
val = stmt′.val
return_value = SSAValue(idx′)
inline_compact[idx′] = val
inline_compact.result[idx′][:type] =
argextype(val, isa(val, Argument) || isa(val, Expr) ? compact : inline_compact)
# Everything legal in value position is guaranteed to be effect free in stmt position
inline_compact.result[idx′][:flag] = IR_FLAGS_REMOVABLE
break
elseif isexpr(stmt′, :boundscheck)
adjust_boundscheck!(inline_compact, idx′, stmt′, boundscheck)
end
inline_compact[idx′] = stmt′
end
just_fixup!(inline_compact, new_new_offset, late_fixup_offset)
compact.result_idx = inline_compact.result_idx
else
bb_offset, post_bb_id = popfirst!(todo_bbs)
# This implements the need_split_before flag above
need_split_before = !isempty(item.ir.cfg.blocks[1].preds)
if need_split_before
finish_current_bb!(compact, 0)
end
pn = PhiNode()
#compact[idx] = nothing
inline_compact = IncrementalCompact(compact, item.ir, compact.result_idx)
@assert isempty(inline_compact.perm) && isempty(inline_compact.pending_perm) "linetable not in canonical form (missing compact call)"
for ((lineidx, idx′), stmt′) in inline_compact
inline_compact[idx′] = nothing
inline_compact[SSAValue(idx′)][:line] = (ssa_substitute.inlined_at[1], ssa_substitute.inlined_at[2], Int32(lineidx))
insert_node! = InsertBefore(inline_compact, SSAValue(idx′))
stmt′ = ssa_substitute_op!(insert_node!, inline_compact[SSAValue(idx′)], stmt′, ssa_substitute)
if isa(stmt′, ReturnNode)
if isdefined(stmt′, :val)
val = stmt′.val
@assert !isa(val, Expr) # GlobalRefs with side-effects are disallowed in value position in IRCode
push!(pn.edges, inline_compact.active_result_bb-1)
push!(pn.values, val)
stmt′ = GotoNode(post_bb_id)
end
elseif isa(stmt′, GotoNode)
stmt′ = GotoNode(stmt′.label + bb_offset)
elseif isa(stmt′, EnterNode)
stmt′ = EnterNode(stmt′, stmt′.catch_dest == 0 ? 0 : stmt′.catch_dest + bb_offset)
elseif isa(stmt′, GotoIfNot)
stmt′ = GotoIfNot(stmt′.cond, stmt′.dest + bb_offset)
elseif isa(stmt′, PhiNode)
stmt′ = PhiNode(Int32[edge+bb_offset for edge in stmt′.edges], stmt′.values)
elseif isexpr(stmt′, :boundscheck)
adjust_boundscheck!(inline_compact, idx′, stmt′, boundscheck)
end
inline_compact[idx′] = stmt′
end
just_fixup!(inline_compact, new_new_offset, late_fixup_offset)
compact.result_idx = inline_compact.result_idx
compact.active_result_bb = inline_compact.active_result_bb
if length(pn.edges) == 1
return_value = pn.values[1]
else
return_value = insert_node_here!(compact,
NewInstruction(pn, argextype(SSAValue(idx), compact), compact.result[idx][:line]))
end
end
return_value
end
function fix_va_argexprs!(insert_node!::Inserter, inline_target::Union{IRCode, IncrementalCompact},
argexprs::Vector{Any}, nargs_def::Int, line_idx::NTuple{3,Int32})
newargexprs = argexprs[1:(nargs_def-1)]
tuple_call = Expr(:call, TOP_TUPLE)
tuple_typs = Any[]
for i in nargs_def:length(argexprs)
arg = argexprs[i]
push!(tuple_call.args, arg)
push!(tuple_typs, argextype(arg, inline_target))
end
tuple_typ = tuple_tfunc(SimpleInferenceLattice.instance, tuple_typs)
tuple_inst = NewInstruction(tuple_call, tuple_typ, line_idx)
push!(newargexprs, insert_node!(tuple_inst))
return newargexprs
end
"""
ir_inline_unionsplit!
The primary purpose of this function is to emulate the dispatch behavior by generating flat
`isa`-checks that correspond to the signatures of union-split dispatch candidates.
These checks allow us to inline the method bodies into respective `isa`-conditional blocks.
Note that two pre-conditions are required for this emulation to work correctly:
1. Ordered Dispatch Candidates
The dispatch candidates must be processed in order of their specificity.
The generated `isa`-checks should reflect this order,
especially since the method signatures may not be concrete.
For instance, with the methods:
f(x::Int) = ...
f(x::Number) = ...
f(x::Any) = ...
A correct `isa`-based dispatch emulation for the call site `f(x::Any)` would look like:
if isa(x, Int)
[inlined/resolved f(x::Int)]
elseif isa(x, Number)
[inlined/resolved f(x::Number)]
else
[inlined/resolved f(x::Any)]
end
`ml_matches` should already sort the matched method candidates correctly,
except in ambiguous cases, which we've already excluded at this state.
2. Type Equality Constraints
Another factor is the type equality constraint imposed by type variables.
Simple `isa`-checks are insufficient to capture the semantics in some cases.
For example, given the following method definition:
g(x::T, y::T) where T<:Integer = ...
it is _invalid_ to optimize a cal site like `g(x::Any, y::Any)` into:
if isa(x, Integer) && isa(y, Integer)
[inlined/resolved g(x::Integer, y::Integer)]
else
g(x, y) # fallback dynamic dispatch
end
since we also need to check that `x` and `y` are equal types.
But, we've already excluded such cases at this point,
mainly by filtering out `case.sig::UnionAll`,
so there is no need to worry about type equality at this point.
In essence, we can process the dispatch candidates sequentially,
assuming their order stays the same post-discovery in `ml_matches`.
"""
function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, argexprs::Vector{Any},
union_split::UnionSplit, boundscheck::Symbol,
todo_bbs::Vector{Tuple{Int,Int}}, interp::AbstractInterpreter)
(; handled_all_cases, fully_covered, atype, cases, bbs) = union_split
stmt, typ, line = compact.result[idx][:stmt], compact.result[idx][:type], compact.result[idx][:line]
join_bb = bbs[end]
pn = PhiNode()
local bb = compact.active_result_bb
ncases = length(cases)
@assert length(bbs) >= ncases
for i = 1:ncases
ithcase = cases[i]
mtype = ithcase.sig::DataType # checked within `handle_cases!`
case = ithcase.item
next_cond_bb = bbs[i]
cond = true
nparams = fieldcount(atype)
@assert nparams == fieldcount(mtype)
if !(i == ncases && fully_covered && handled_all_cases)
for i = 1:nparams
aft, mft = fieldtype(atype, i), fieldtype(mtype, i)
# If this is always true, we don't need to check for it
aft <: mft && continue
# Generate isa check
isa_expr = Expr(:call, isa, argexprs[i], mft)
isa_type = isa_tfunc(optimizer_lattice(interp), argextype(argexprs[i], compact), Const(mft))
ssa = insert_node_here!(compact, NewInstruction(isa_expr, isa_type, line))
if cond === true
cond = ssa
else
and_expr = Expr(:call, and_int, cond, ssa)
and_type = and_int_tfunc(optimizer_lattice(interp), argextype(cond, compact), isa_type)
cond = insert_node_here!(compact, NewInstruction(and_expr, and_type, line))
end
end
insert_node_here!(compact, NewInstruction(GotoIfNot(cond, next_cond_bb), Any, line))
end
bb = next_cond_bb - 1
finish_current_bb!(compact, 0)
argexprs′ = argexprs
if !isa(case, ConstantCase)
argexprs′ = copy(argexprs)
for i = 1:nparams
argex = argexprs[i]
(isa(argex, SSAValue) || isa(argex, Argument)) || continue
aft, mft = fieldtype(atype, i), fieldtype(mtype, i)
if !(aft <: mft)
𝕃ₒ = optimizer_lattice(interp)
narrowed_type = tmeet(𝕃ₒ, argextype(argex, compact), mft)
argexprs′[i] = insert_node_here!(compact,
NewInstruction(PiNode(argex, mft), narrowed_type, line))
end
end
end
if isa(case, InliningTodo)
val = ir_inline_item!(compact, idx, argexprs′, case, boundscheck, todo_bbs)
elseif isa(case, InvokeCase)
invoke_stmt = Expr(:invoke, case.invoke, argexprs′...)
flag = flags_for_effects(case.effects)
val = insert_node_here!(compact, NewInstruction(invoke_stmt, typ, case.info, line, flag))
else
case = case::ConstantCase
val = case.val
end
if !isempty(compact.cfg_transform.result_bbs[bb].preds)
push!(pn.edges, bb)
push!(pn.values, val)
insert_node_here!(compact,
NewInstruction(GotoNode(join_bb), Any, line))
else
insert_node_here!(compact,
NewInstruction(ReturnNode(), Union{}, line))
end
finish_current_bb!(compact, 0)
end
bb += 1
# We're now in the fall through block, decide what to do
if !handled_all_cases
ssa = insert_node_here!(compact, NewInstruction(stmt, typ, line))
push!(pn.edges, bb)
push!(pn.values, ssa)
insert_node_here!(compact, NewInstruction(GotoNode(join_bb), Any, line))
finish_current_bb!(compact, 0)
elseif !fully_covered
insert_node_here!(compact, NewInstruction(Expr(:call, GlobalRef(Core, :throw_methoderror), argexprs...), Union{}, line))
insert_node_here!(compact, NewInstruction(ReturnNode(), Union{}, line))
finish_current_bb!(compact, 0)
ncases == 0 && return insert_node_here!(compact, NewInstruction(nothing, Any, line))
end
# We're now in the join block.
return insert_node_here!(compact, NewInstruction(pn, typ, line))
end
function batch_inline!(ir::IRCode, todo::Vector{Pair{Int,Any}}, propagate_inbounds::Bool, interp::AbstractInterpreter)
params = OptimizationParams(interp)
# Compute the new CFG first (modulo statement ranges, which will be computed below)
state = CFGInliningState(ir)
for (idx, item) in todo
if isa(item, UnionSplit)
cfg_inline_unionsplit!(ir, idx, item, state, params)
else
item = item::InliningTodo
# A linear inline does not modify the CFG
item.linear_inline_eligible && continue
cfg_inline_item!(ir, idx, item, state, false)
end
end
finish_cfg_inline!(state)
boundscheck = propagate_inbounds ? :propagate : :default
let compact = IncrementalCompact(ir, CFGTransformState!(state.new_cfg_blocks, false))
# This needs to be a minimum and is more of a size hint
nn = 0
for (_, item) in todo
if isa(item, InliningTodo)
nn += (length(item.ir.stmts) + length(item.ir.new_nodes))
end
end
nnewnodes = length(compact.result) + nn
resize!(compact, nnewnodes)
(inline_idx, item) = popfirst!(todo)
for ((old_idx, idx), stmt) in compact
if old_idx == inline_idx
stmt = stmt::Expr
if stmt.head === :invoke
argexprs = stmt.args[2:end]
else
@assert stmt.head === :call
argexprs = copy(stmt.args)
end
refinish = false
if compact.result_idx == first(compact.cfg_transform.result_bbs[compact.active_result_bb].stmts)
compact.active_result_bb -= 1
refinish = true
end
# It is possible for GlobalRefs and Exprs to be in argument position
# at this point in the IR, though in that case they are required
# to be effect-free. However, we must still move them out of argument
# position, since `Argument` is allowed in PhiNodes, but `GlobalRef`
# and `Expr` are not, so a substitution could anger the verifier.
for aidx in 1:length(argexprs)
aexpr = argexprs[aidx]
if isa(aexpr, Expr) || isa(aexpr, GlobalRef)
ninst = removable_if_unused(NewInstruction(aexpr, argextype(aexpr, compact), compact.result[idx][:line]))
argexprs[aidx] = insert_node_here!(compact, ninst)
end
end
if isa(item, InliningTodo)
compact.ssa_rename[old_idx] = ir_inline_item!(compact, idx, argexprs, item, boundscheck, state.todo_bbs)
elseif isa(item, UnionSplit)
compact.ssa_rename[old_idx] = ir_inline_unionsplit!(compact, idx, argexprs, item, boundscheck, state.todo_bbs, interp)
end
compact[idx] = nothing
refinish && finish_current_bb!(compact, 0)
if !isempty(todo)
(inline_idx, item) = popfirst!(todo)
else
inline_idx = -1
end
elseif isa(stmt, GotoNode)
compact[idx] = GotoNode(state.bb_rename[stmt.label])
elseif isa(stmt, EnterNode)
compact[idx] = EnterNode(stmt, stmt.catch_dest == 0 ? 0 : state.bb_rename[stmt.catch_dest])
elseif isa(stmt, GotoIfNot)
compact[idx] = GotoIfNot(stmt.cond, state.bb_rename[stmt.dest])
elseif isa(stmt, PhiNode)
compact[idx] = PhiNode(Int32[edge == length(state.bb_rename) ? length(state.new_cfg_blocks) : state.bb_rename[edge+1]-1 for edge in stmt.edges], stmt.values)
end
end
ir = finish(compact)
end
return ir
end
# This assumes the caller has verified that all arguments to the _apply_iterate call are Tuples.
function rewrite_apply_exprargs!(todo::Vector{Pair{Int,Any}},
ir::IRCode, idx::Int, stmt::Expr, argtypes::Vector{Any},
arginfos::Vector{MaybeAbstractIterationInfo}, arg_start::Int, istate::InliningState)
flag = ir.stmts[idx][:flag]
argexprs = stmt.args
new_argexprs = Any[argexprs[arg_start]]
new_argtypes = Any[argtypes[arg_start]]
# loop over original arguments and flatten any known iterators
for i in (arg_start+1):length(argexprs)
def = argexprs[i]
def_type = argtypes[i]
thisarginfo = arginfos[i-arg_start]
if thisarginfo === nothing || !thisarginfo.complete
if def_type isa PartialStruct
# def_type.typ <: Tuple is assumed
def_argtypes = def_type.fields
else
def_argtypes = Any[]
if isa(def_type, Const) # && isa(def_type.val, Union{Tuple, SimpleVector}) is implied
for p in def_type.val
push!(def_argtypes, Const(p))
end
else
ti = widenconst(def_type)::DataType # checked by `is_valid_type_for_apply_rewrite`
if ti.name === _NAMEDTUPLE_NAME
ti = ti.parameters[2]::DataType # checked by `is_valid_type_for_apply_rewrite`
end
for p in ti.parameters
if issingletontype(p)
# replace singleton types with their equivalent Const object
p = Const(p.instance)
elseif isconstType(p)
p = Const(p.parameters[1])
end
push!(def_argtypes, p)
end
end
end
# now push flattened types into new_argtypes and getfield exprs into new_argexprs
for j in 1:length(def_argtypes)
def_atype = def_argtypes[j]
if isa(def_atype, Const) && is_inlineable_constant(def_atype.val)
new_argexpr = quoted(def_atype.val)
else
new_call = Expr(:call, GlobalRef(Core, :getfield), def, j)
new_argexpr = insert_node!(ir, idx, NewInstruction(new_call, def_atype))
end
push!(new_argexprs, new_argexpr)
push!(new_argtypes, def_atype)
end
else
state = Core.svec()
for i = 1:length(thisarginfo.each)
call = thisarginfo.each[i]
new_stmt = Expr(:call, argexprs[2], def, state...)
state1 = insert_node!(ir, idx, NewInstruction(new_stmt, call.rt))
new_sig = call_sig(ir, new_stmt)::Signature
new_info = call.info
# See if we can inline this call to `iterate`
handle_call!(todo, ir, state1.id, new_stmt, new_info, flag, new_sig, istate)
if i != length(thisarginfo.each)
valT = getfield_tfunc(optimizer_lattice(istate.interp), call.rt, Const(1))
val_extracted = insert_node!(ir, idx, NewInstruction(
Expr(:call, GlobalRef(Core, :getfield), state1, 1),
valT))
push!(new_argexprs, val_extracted)
push!(new_argtypes, valT)
state_extracted = insert_node!(ir, idx, NewInstruction(
Expr(:call, GlobalRef(Core, :getfield), state1, 2),
getfield_tfunc(optimizer_lattice(istate.interp), call.rt, Const(2))))
state = Core.svec(state_extracted)
end
end
end
end
stmt.args = new_argexprs
return new_argtypes
end
function compileable_specialization(mi::MethodInstance, effects::Effects,
et::InliningEdgeTracker, @nospecialize(info::CallInfo); compilesig_invokes::Bool=true)
mi_invoke = mi
method, atype, sparams = mi.def::Method, mi.specTypes, mi.sparam_vals
if compilesig_invokes
new_atype = get_compileable_sig(method, atype, sparams)
new_atype === nothing && return nothing
if atype !== new_atype
sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), new_atype, method.sig)::SimpleVector
if sparams === sp_[2]::SimpleVector
mi_invoke = specialize_method(method, new_atype, sparams)
mi_invoke === nothing && return nothing
end
end
else
# If this caller does not want us to optimize calls to use their
# declared compilesig, then it is also likely they would handle sparams
# incorrectly if there were any unknown typevars, so we conservatively return nothing
if any(@nospecialize(t)->isa(t, TypeVar), mi.sparam_vals)
return nothing
end
end
add_inlining_backedge!(et, mi) # to the dispatch lookup
mi_invoke !== mi && push!(et.edges, method.sig, mi_invoke) # add_inlining_backedge to the invoke call, if that is different
return InvokeCase(mi_invoke, effects, info)
end
function compileable_specialization(match::MethodMatch, effects::Effects,
et::InliningEdgeTracker, @nospecialize(info::CallInfo); compilesig_invokes::Bool=true)
mi = specialize_method(match)
return compileable_specialization(mi, effects, et, info; compilesig_invokes)
end
struct InferredResult
src::Any # CodeInfo or IRCode
effects::Effects
InferredResult(@nospecialize(src), effects::Effects) = new(src, effects)
end
@inline function get_cached_result(state::InliningState, mi::MethodInstance)
code = get(code_cache(state), mi, nothing)
if code isa CodeInstance
if use_const_api(code)
# in this case function can be inlined to a constant
return ConstantCase(quoted(code.rettype_const))
end
return code
end
return nothing
end
@inline function get_local_result(inf_result::InferenceResult)
effects = inf_result.ipo_effects
if is_foldable_nothrow(effects)
res = inf_result.result
if isa(res, Const) && is_inlineable_constant(res.val)
# use constant calling convention
return ConstantCase(quoted(res.val))
end
end
return InferredResult(inf_result.src, effects)
end
# the general resolver for usual and const-prop'ed calls
function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult,VolatileInferenceResult},
@nospecialize(info::CallInfo), flag::UInt32, state::InliningState;
invokesig::Union{Nothing,Vector{Any}}=nothing)
et = InliningEdgeTracker(state, invokesig)
preserve_local_sources = true
if isa(result, InferenceResult)
inferred_result = get_local_result(result)
elseif isa(result, VolatileInferenceResult)
inferred_result = get_local_result(result.inf_result)
# volatile inference result can be inlined destructively
preserve_local_sources = !result.inf_result.is_src_volatile | OptimizationParams(state.interp).preserve_local_sources
else
inferred_result = get_cached_result(state, mi)
end
if inferred_result isa ConstantCase
add_inlining_backedge!(et, mi)
return inferred_result
elseif inferred_result isa InferredResult
(; src, effects) = inferred_result
elseif inferred_result isa CodeInstance
src = @atomic :monotonic inferred_result.inferred
effects = decode_effects(inferred_result.ipo_purity_bits)
else # there is no cached source available, bail out
return compileable_specialization(mi, Effects(), et, info;
compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes)
end
# the duplicated check might have been done already within `analyze_method!`, but still
# we need it here too since we may come here directly using a constant-prop' result
if !OptimizationParams(state.interp).inlining || is_stmt_noinline(flag)
return compileable_specialization(mi, effects, et, info;
compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes)
end
src_inlining_policy(state.interp, src, info, flag) ||
return compileable_specialization(mi, effects, et, info;
compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes)
add_inlining_backedge!(et, mi)
if inferred_result isa CodeInstance
ir, spec_info, debuginfo = retrieve_ir_for_inlining(inferred_result, src)
else
ir, spec_info, debuginfo = retrieve_ir_for_inlining(mi, src, preserve_local_sources)
end
return InliningTodo(mi, ir, spec_info, debuginfo, effects)
end
# the special resolver for :invoke-d call
function resolve_todo(mi::MethodInstance, @nospecialize(info::CallInfo), flag::UInt32,
state::InliningState)
if !OptimizationParams(state.interp).inlining || is_stmt_noinline(flag)
return nothing
end
et = InliningEdgeTracker(state)
cached_result = get_cached_result(state, mi)
if cached_result isa ConstantCase
add_inlining_backedge!(et, mi)
return cached_result
elseif cached_result isa CodeInstance
src = @atomic :monotonic cached_result.inferred
effects = decode_effects(cached_result.ipo_purity_bits)
else # there is no cached source available, bail out
return nothing
end
src_inlining_policy(state.interp, src, info, flag) || return nothing
ir, spec_info, debuginfo = retrieve_ir_for_inlining(cached_result, src)
add_inlining_backedge!(et, mi)
return InliningTodo(mi, ir, spec_info, debuginfo, effects)
end
function validate_sparams(sparams::SimpleVector)
for i = 1:length(sparams)
spᵢ = sparams[i]
(isa(spᵢ, TypeVar) || isvarargtype(spᵢ)) && return false
end
return true
end
function may_have_fcalls(m::Method)
isdefined(m, :source) || return true
src = m.source
isa(src, MaybeCompressed) || return true
return ccall(:jl_ir_flag_has_fcall, Bool, (Any,), src)
end
function analyze_method!(match::MethodMatch, argtypes::Vector{Any},
@nospecialize(info::CallInfo), flag::UInt32, state::InliningState;
allow_typevars::Bool, invokesig::Union{Nothing,Vector{Any}}=nothing,
volatile_inf_result::Union{Nothing,VolatileInferenceResult}=nothing)
method = match.method
spec_types = match.spec_types
# Check that we have the correct number of arguments
na = Int(method.nargs)
npassedargs = length(argtypes)
if na != npassedargs && !(na > 0 && method.isva)
# we have a method match only because an earlier
# inference step shortened our call args list, even
# though we have too many arguments to actually
# call this function
return nothing
end
if !match.fully_covers
# type-intersection was not able to give us a simple list of types, so
# ir_inline_unionsplit won't be able to deal with inlining this
if !(spec_types isa DataType && length(spec_types.parameters) == npassedargs &&
!isvarargtype(spec_types.parameters[end]))
return nothing
end
end
if !validate_sparams(match.sparams)
(allow_typevars && !may_have_fcalls(match.method)) || return nothing
end
# Get the specialization for this method signature
# (later we will decide what to do with it)
mi = specialize_method(match)
return resolve_todo(mi, volatile_inf_result, info, flag, state; invokesig)
end
function retrieve_ir_for_inlining(cached_result::CodeInstance, src::String)
src = _uncompressed_ir(cached_result, src)
return inflate_ir!(src, cached_result.def), SpecInfo(src), src.debuginfo
end
function retrieve_ir_for_inlining(cached_result::CodeInstance, src::CodeInfo)
return inflate_ir!(copy(src), cached_result.def), SpecInfo(src), src.debuginfo
end
function retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo, preserve_local_sources::Bool)
if preserve_local_sources
src = copy(src)
end
return inflate_ir!(src, mi), SpecInfo(src), src.debuginfo
end
function retrieve_ir_for_inlining(mi::MethodInstance, ir::IRCode, preserve_local_sources::Bool)
if preserve_local_sources
ir = copy(ir)
end
# COMBAK this is not correct, we should make `InferenceResult` propagate `SpecInfo`
spec_info = let m = mi.def::Method
SpecInfo(Int(m.nargs), m.isva, false, nothing)
end
ir.debuginfo.def = mi
return ir, spec_info, DebugInfo(ir.debuginfo, length(ir.stmts))
end
function handle_single_case!(todo::Vector{Pair{Int,Any}},
ir::IRCode, idx::Int, stmt::Expr, @nospecialize(case),
isinvoke::Bool = false)