diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 21303f527e3f2..12eb9a03e323b 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -1181,7 +1181,7 @@ function const_prop_methodinstance_heuristic(interp::AbstractInterpreter, if isa(code, CodeInstance) inferred = @atomic :monotonic code.inferred # TODO propagate a specific `CallInfo` that conveys information about this call - if inlining_policy(interp, inferred, NoCallInfo(), IR_FLAG_NULL) !== nothing + if src_inlining_policy(interp, inferred, NoCallInfo(), IR_FLAG_NULL) return true end end diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 0cc79cb92df6b..663fd78c90dba 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -800,7 +800,7 @@ function IRInterpretationState(interp::AbstractInterpreter, @assert code.def === mi src = @atomic :monotonic code.inferred if isa(src, String) - src = _uncompressed_ir(mi.def, src) + src = _uncompressed_ir(code, src) else isa(src, CodeInfo) || return nothing end diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 3172c44c81158..d3c680f9308d3 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -107,21 +107,23 @@ is_declared_noinline(@nospecialize src::MaybeCompressed) = # OptimizationState # ##################### -function inlining_policy(interp::AbstractInterpreter, +# return whether this src should be inlined. If so, retrieve_ir_for_inlining must return an IRCode from it +function src_inlining_policy(interp::AbstractInterpreter, @nospecialize(src), @nospecialize(info::CallInfo), stmt_flag::UInt32) if isa(src, MaybeCompressed) src_inlineable = is_stmt_inline(stmt_flag) || is_inlineable(src) - return src_inlineable ? src : nothing + return src_inlineable elseif isa(src, IRCode) - return src + return true elseif isa(src, SemiConcreteResult) - return src - elseif isa(src, CodeInstance) - return inlining_policy(interp, src.inferred, info, stmt_flag) + return true end - return nothing + @assert !isa(src, CodeInstance) # handled by caller + return false end +function inlining_policy end # deprecated legacy name used by Cthulhu + struct InliningState{Interp<:AbstractInterpreter} edges::Vector{Any} world::UInt diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index d4228cf3c4454..a89583494ee13 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -838,7 +838,7 @@ function compileable_specialization(match::MethodMatch, effects::Effects, end struct InferredResult - src::Any + src::Any # CodeInfo or IRCode effects::Effects InferredResult(@nospecialize(src), effects::Effects) = new(src, effects) end @@ -849,11 +849,9 @@ end # in this case function can be inlined to a constant return ConstantCase(quoted(code.rettype_const)) end - src = @atomic :monotonic code.inferred - effects = decode_effects(code.ipo_purity_bits) - return InferredResult(src, effects) + return code end - return InferredResult(nothing, Effects()) + return nothing end @inline function get_local_result(inf_result::InferenceResult) effects = inf_result.ipo_effects @@ -887,7 +885,15 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, add_inlining_backedge!(et, mi) return inferred_result end - (; src, effects) = inferred_result + if inferred_result isa InferredResult + (; src, effects) = inferred_result + elseif inferred_result isa CodeInstance + src = @atomic :monotonic inferred_result.inferred + effects = decode_effects(inferred_result.ipo_purity_bits) + else + src = nothing + effects = Effects() + end # the duplicated check might have been done already within `analyze_method!`, but still # we need it here too since we may come here directly using a constant-prop' result @@ -896,12 +902,13 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) end - src = inlining_policy(state.interp, src, info, flag) - src === nothing && return compileable_specialization(mi, effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + src_inlining_policy(state.interp, src, info, flag) || + return compileable_specialization(mi, effects, et, info; + compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) add_inlining_backedge!(et, mi) - ir = retrieve_ir_for_inlining(mi, src, preserve_local_sources) + ir = inferred_result isa CodeInstance ? retrieve_ir_for_inlining(inferred_result, src) : + retrieve_ir_for_inlining(mi, src, preserve_local_sources) return InliningTodo(mi, ir, effects) end @@ -919,14 +926,22 @@ function resolve_todo(mi::MethodInstance, @nospecialize(info::CallInfo), flag::U add_inlining_backedge!(et, mi) return cached_result end - (; src, effects) = cached_result - - src = inlining_policy(state.interp, src, info, flag) - - src === nothing && return nothing + if cached_result isa InferredResult + (; src, effects) = cached_result + elseif cached_result isa CodeInstance + src = @atomic :monotonic cached_result.inferred + effects = decode_effects(cached_result.ipo_purity_bits) + else + src = nothing + effects = Effects() + end + preserve_local_sources = true + src_inlining_policy(state.interp, src, info, flag) || return nothing + ir = cached_result isa CodeInstance ? retrieve_ir_for_inlining(cached_result, src) : + retrieve_ir_for_inlining(mi, src, preserve_local_sources) add_inlining_backedge!(et, mi) - return InliningTodo(mi, retrieve_ir_for_inlining(mi, src), effects) + return InliningTodo(mi, ir, effects) end function validate_sparams(sparams::SimpleVector) @@ -979,17 +994,17 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, return resolve_todo(mi, volatile_inf_result, info, flag, state; invokesig) end -function retrieve_ir_for_inlining(mi::MethodInstance, src::String, ::Bool=true) - src = _uncompressed_ir(mi.def, src) - return inflate_ir!(src, mi) +function retrieve_ir_for_inlining(cached_result::CodeInstance, src::MaybeCompressed) + src = _uncompressed_ir(cached_result, src)::CodeInfo + return inflate_ir!(src, cached_result.def) end -function retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo, preserve_local_sources::Bool=true) +function retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo, preserve_local_sources::Bool) if preserve_local_sources src = copy(src) end return inflate_ir!(src, mi) end -function retrieve_ir_for_inlining(::MethodInstance, ir::IRCode, preserve_local_sources::Bool=true) +function retrieve_ir_for_inlining(mi::MethodInstance, ir::IRCode, preserve_local_sources::Bool) if preserve_local_sources ir = copy(ir) end @@ -1494,13 +1509,13 @@ function semiconcrete_result_item(result::SemiConcreteResult, return compileable_specialization(mi, result.effects, et, info; compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) end - ir = inlining_policy(state.interp, result.ir, info, flag) - ir === nothing && return compileable_specialization(mi, result.effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + src_inlining_policy(state.interp, result.ir, info, flag) || + return compileable_specialization(mi, result.effects, et, info; + compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) add_inlining_backedge!(et, mi) preserve_local_sources = OptimizationParams(state.interp).preserve_local_sources - ir = retrieve_ir_for_inlining(mi, ir, preserve_local_sources) + ir = retrieve_ir_for_inlining(mi, result.ir, preserve_local_sources) return InliningTodo(mi, ir, result.effects) end diff --git a/base/compiler/ssair/legacy.jl b/base/compiler/ssair/legacy.jl index b1ce14f28cf14..3e9a4e2a746dc 100644 --- a/base/compiler/ssair/legacy.jl +++ b/base/compiler/ssair/legacy.jl @@ -55,6 +55,8 @@ Mainly used for testing or interactive use. inflate_ir(ci::CodeInfo, linfo::MethodInstance) = inflate_ir!(copy(ci), linfo) inflate_ir(ci::CodeInfo, sptypes::Vector{VarState}, argtypes::Vector{Any}) = inflate_ir!(copy(ci), sptypes, argtypes) function inflate_ir(ci::CodeInfo) + parent = ci.parent + isa(parent, MethodInstance) && return inflate_ir(ci, parent) # XXX the length of `ci.slotflags` may be different from the actual number of call # arguments, but we really don't know that information in this case argtypes = Any[ Any for i = 1:length(ci.slotflags) ] diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 5ea21c85427be..0d855f38cd286 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1492,12 +1492,11 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, end src = @atomic :monotonic code.inferred else - src = nothing + return false end - src = inlining_policy(inlining.interp, src, info, IR_FLAG_NULL) - src === nothing && return false - src = retrieve_ir_for_inlining(mi, src) + src_inlining_policy(inlining.interp, src, info, IR_FLAG_NULL) || return false + src = retrieve_ir_for_inlining(code, src) # For now: Require finalizer to only have one basic block length(src.cfg.blocks) == 1 || return false diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 783bea60e3575..5af074498ca7e 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -310,11 +310,20 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult; end end relocatability = 0x0 + owner = cache_owner(interp) if const_flags == 0x3 && can_discard_trees inferred_result = nothing relocatability = 0x1 else inferred_result = transform_result_for_cache(interp, result.linfo, result.valid_worlds, result, can_discard_trees) + if inferred_result isa CodeInfo + uncompressed = inferred_result + inferred_result = maybe_compress_codeinfo(interp, result.linfo, inferred_result, can_discard_trees) + result.is_src_volatile |= uncompressed !== inferred_result + elseif owner === nothing + # The global cache can only handle objects that codegen understands + inferred_result = nothing + end if isa(inferred_result, String) t = @_gc_preserve_begin inferred_result relocatability = unsafe_load(unsafe_convert(Ptr{UInt8}, inferred_result), Core.sizeof(inferred_result)) @@ -323,8 +332,8 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult; relocatability = 0x1 end end - # relocatability = isa(inferred_result, String) ? inferred_result[end] : UInt8(0) - return CodeInstance(result.linfo, cache_owner(interp), + # n.b. relocatability = (isa(inferred_result, String) && inferred_result[end]) || inferred_result === nothing + return CodeInstance(result.linfo, owner, widenconst(result_type), widenconst(result.exc_result), rettype_const, inferred_result, const_flags, first(result.valid_worlds), last(result.valid_worlds), # TODO: Actually do something with non-IPO effects @@ -332,6 +341,12 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult; relocatability) end +function transform_result_for_cache(interp::AbstractInterpreter, + linfo::MethodInstance, valid_worlds::WorldRange, result::InferenceResult, + can_discard_trees::Bool=may_discard_trees(interp)) + return result.src +end + function maybe_compress_codeinfo(interp::AbstractInterpreter, linfo::MethodInstance, ci::CodeInfo, can_discard_trees::Bool=may_discard_trees(interp)) def = linfo.def @@ -354,22 +369,6 @@ function maybe_compress_codeinfo(interp::AbstractInterpreter, linfo::MethodInsta end end -function transform_result_for_cache(interp::AbstractInterpreter, - linfo::MethodInstance, valid_worlds::WorldRange, result::InferenceResult, - can_discard_trees::Bool=may_discard_trees(interp)) - inferred_result = result.src - if inferred_result isa CodeInfo - uncompressed = inferred_result - inferred_result = maybe_compress_codeinfo(interp, linfo, inferred_result, can_discard_trees) - result.is_src_volatile |= uncompressed !== inferred_result - end - # The global cache can only handle objects that codegen understands - if !isa(inferred_result, MaybeCompressed) - inferred_result = nothing - end - return inferred_result -end - function cache_result!(interp::AbstractInterpreter, result::InferenceResult) if last(result.valid_worlds) == get_world_counter() # if we've successfully recorded all of the backedges in the global reverse-cache, @@ -874,7 +873,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) # propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization: # note that this result is cached globally exclusively, we can use this local result destructively - volatile_inf_result = isinferred && (force_inline || inlining_policy(interp, result.src, NoCallInfo(), IR_FLAG_NULL) !== nothing) ? + volatile_inf_result = isinferred && (force_inline || src_inlining_policy(interp, result.src, NoCallInfo(), IR_FLAG_NULL) !== nothing) ? VolatileInferenceResult(result) : nothing return EdgeCallResult(frame.bestguess, exc_bestguess, edge, effects, volatile_inf_result) elseif frame === true @@ -930,6 +929,7 @@ function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, @no tree.linetable = LineInfoNode[LineInfoNode(method.module, method.name, method.file, method.line, Int32(0))] tree.ssaflags = UInt32[0] set_inlineable!(tree, true) + tree.parent = mi return tree end @@ -1091,6 +1091,8 @@ function ci_meets_requirement(code::CodeInstance, source_mode::UInt8, ci_is_cach return false end +_uncompressed_ir(ci::Core.CodeInstance, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Any, Any), ci.def.def::Method, ci, s)::CodeInfo + # compute (and cache) an inferred AST and return type function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mode::UInt8) start_time = ccall(:jl_typeinf_timing_begin, UInt64, ()) diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index 66b5d3d15624b..c2c36920bd574 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -143,12 +143,13 @@ function retrieve_code_info(linfo::MethodInstance, world::UInt) # can happen in images built with --strip-ir return nothing elseif isa(src, String) - c = _uncompressed_ir(def, src) + c = ccall(:jl_uncompress_ir, Any, (Any, Ptr{Cvoid}, Any), def, C_NULL, src) else c = copy(src::CodeInfo) end end if c isa CodeInfo + c.parent = linfo return c end return nothing diff --git a/base/reflection.jl b/base/reflection.jl index 7f7d3ccf0a144..7a7d60ba25f1e 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1313,12 +1313,14 @@ function length(mt::Core.MethodTable) end isempty(mt::Core.MethodTable) = (mt.defs === nothing) -uncompressed_ir(m::Method) = isdefined(m, :source) ? _uncompressed_ir(m, m.source) : +uncompressed_ir(m::Method) = isdefined(m, :source) ? _uncompressed_ir(m) : isdefined(m, :generator) ? error("Method is @generated; try `code_lowered` instead.") : error("Code for this Method is not available.") -_uncompressed_ir(m::Method, s::CodeInfo) = copy(s) -_uncompressed_ir(m::Method, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Any), m, s)::CodeInfo -_uncompressed_ir(ci::Core.CodeInstance, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Any), ci.def.def::Method, s)::CodeInfo +function _uncompressed_ir(m::Method) + s = m.source + s isa String && (s = ccall(:jl_uncompress_ir, Any, (Any, Ptr{Cvoid}, Any), m, C_NULL, s)) + return s::CodeInfo +end # for backwards compat const uncompressed_ast = uncompressed_ir const _uncompressed_ast = _uncompressed_ir @@ -1632,7 +1634,7 @@ function get_oc_code_rt(@nospecialize(oc::Core.OpaqueClosure)) ccall(:jl_is_in_pure_context, Bool, ()) && error("code reflection cannot be used from generated functions") m = oc.source if isa(m, Method) - code = _uncompressed_ir(m, m.source) + code = _uncompressed_ir(m) return Pair{CodeInfo,Any}(code, typeof(oc).parameters[2]) else error("encountered invalid Core.OpaqueClosure object") diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 54c92fb688d30..c07f7bf32780a 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -301,7 +301,7 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance if ((jl_value_t*)*src_out == jl_nothing) *src_out = NULL; if (*src_out && jl_is_method(def)) - *src_out = jl_uncompress_ir(def, (jl_value_t*)*src_out); + *src_out = jl_uncompress_ir(def, codeinst, (jl_value_t*)*src_out); } if (*src_out == NULL || !jl_is_code_info(*src_out)) { if (cgparams.lookup != jl_rettype_inferred_addr) { @@ -1950,7 +1950,7 @@ extern "C" JL_DLLEXPORT_CODEGEN jl_code_info_t *jl_gdbdumpcode(jl_method_instanc src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) { JL_GC_PUSH2(&codeinst, &src); - src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); + src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); JL_GC_POP(); } } @@ -1989,7 +1989,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz } if (src) { if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) - src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); + src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); } // emit this function into a new llvm module diff --git a/src/codegen.cpp b/src/codegen.cpp index 93fc1b9a680e2..c8e1ba9d4bdc7 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6015,7 +6015,7 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met if (it == ctx.emission_context.compiled_functions.end()) { ++EmittedOpaqueClosureFunctions; - jl_code_info_t *ir = jl_uncompress_ir(closure_method, (jl_value_t*)inferred); + jl_code_info_t *ir = jl_uncompress_ir(closure_method, ci, (jl_value_t*)inferred); JL_GC_PUSH1(&ir); // TODO: Emit this inline and outline it late using LLVM's coroutine support. orc::ThreadSafeModule closure_m = jl_create_ts_module( @@ -9570,7 +9570,7 @@ jl_llvm_functions_t jl_emit_codeinst( return jl_emit_oc_wrapper(m, params, codeinst->def, codeinst->rettype); } if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def)) - src = jl_uncompress_ir(def, (jl_value_t*)src); + src = jl_uncompress_ir(def, codeinst, (jl_value_t*)src); if (!src || !jl_is_code_info(src)) { JL_GC_POP(); m = orc::ThreadSafeModule(); diff --git a/src/interpreter.c b/src/interpreter.c index ffc8c6d8c12ea..5102d1417c939 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -696,7 +696,7 @@ jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *mi, size_t world) } if (src && (jl_value_t*)src != jl_nothing) { JL_GC_PUSH1(&src); - src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); + src = jl_uncompress_ir(mi->def.method, NULL, (jl_value_t*)src); jl_atomic_store_release(&mi->uninferred, (jl_value_t*)src); jl_gc_wb(mi, src); JL_GC_POP(); @@ -758,7 +758,7 @@ JL_DLLEXPORT const jl_callptr_t jl_fptr_interpret_call_addr = &jl_fptr_interpret jl_value_t *jl_interpret_opaque_closure(jl_opaque_closure_t *oc, jl_value_t **args, size_t nargs) { jl_method_t *source = oc->source; - jl_code_info_t *code = jl_uncompress_ir(source, (jl_value_t*)source->source); + jl_code_info_t *code = jl_uncompress_ir(source, NULL, (jl_value_t*)source->source); interpreter_state *s; unsigned nroots = jl_source_nslots(code) + jl_source_nssavalues(code) + 2; jl_task_t *ct = jl_current_task; diff --git a/src/ircode.c b/src/ircode.c index 46d2fd82253de..90dab5f63d494 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -894,7 +894,7 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) return v; } -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_string_t *data) +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t *metadata, jl_string_t *data) { if (jl_is_code_info(data)) return (jl_code_info_t*)data; @@ -969,6 +969,9 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_string_t *data) jl_gc_enable(en); JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); + if (metadata) { + code->parent = metadata->def; + } return code; } diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 3c2a6dd285af4..c3ccb5746e37a 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -498,7 +498,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) if (jl_is_method(def)) { src = (jl_code_info_t*)def->source; if (src && (jl_value_t*)src != jl_nothing) - src = jl_uncompress_ir(def, (jl_value_t*)src); + src = jl_uncompress_ir(def, NULL, (jl_value_t*)src); } else { src = (jl_code_info_t*)jl_atomic_load_relaxed(&unspec->def->uninferred); diff --git a/src/jltypes.c b/src/jltypes.c index 0ecf00651438a..1d743e175cec9 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3142,7 +3142,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_code_info_type = jl_new_datatype(jl_symbol("CodeInfo"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(19, + jl_perm_symsvec(20, "code", "codelocs", "ssavaluetypes", @@ -3151,6 +3151,7 @@ void jl_init_types(void) JL_GC_DISABLED "slotnames", "slotflags", "slottypes", + "parent", "method_for_inference_limit_heuristics", "edges", "min_world", @@ -3162,7 +3163,7 @@ void jl_init_types(void) JL_GC_DISABLED "constprop", "purity", "inlining_cost"), - jl_svec(19, + jl_svec(20, jl_array_any_type, jl_array_int32_type, jl_any_type, @@ -3173,6 +3174,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_any_type, jl_any_type, + jl_any_type, jl_ulong_type, jl_ulong_type, jl_bool_type, @@ -3183,7 +3185,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_uint16_type, jl_uint16_type), jl_emptysvec, - 0, 1, 19); + 0, 1, 20); jl_method_type = jl_new_datatype(jl_symbol("Method"), core, diff --git a/src/julia.h b/src/julia.h index de5146aed7065..0505b1e88d4bb 100644 --- a/src/julia.h +++ b/src/julia.h @@ -294,6 +294,7 @@ typedef struct _jl_code_info_t { jl_array_t *slotflags; // local var bit flags // the following are optional transient properties (not preserved by compression--as they typically get stored elsewhere): jl_value_t *slottypes; // inferred types of slots + jl_method_instance_t *parent; // context (after inference, otherwise nothing) // These may be used by generated functions to further constrain the resulting inputs. // They are not used by any other part of the system and may be moved elsewhere in the @@ -2134,7 +2135,7 @@ JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr JL_MAYBE_UNROOTED); // IR representation JL_DLLEXPORT jl_value_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code); -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_value_t *data); +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t *metadata, jl_value_t *data); JL_DLLEXPORT uint8_t jl_ir_flag_inlining(jl_value_t *data) JL_NOTSAFEPOINT; JL_DLLEXPORT uint8_t jl_ir_flag_has_fcall(jl_value_t *data) JL_NOTSAFEPOINT; JL_DLLEXPORT uint16_t jl_ir_inlining_cost(jl_value_t *data) JL_NOTSAFEPOINT; diff --git a/src/method.c b/src/method.c index 15d2fd673e047..ab9487bbecad4 100644 --- a/src/method.c +++ b/src/method.c @@ -507,6 +507,7 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) src->slotflags = NULL; src->slotnames = NULL; src->slottypes = jl_nothing; + src->parent = (jl_method_instance_t*)jl_nothing; src->min_world = 1; src->max_world = ~(size_t)0; src->edges = jl_nothing; diff --git a/src/staticdata.c b/src/staticdata.c index eab19607d9eba..f899899aff242 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -2344,7 +2344,7 @@ static jl_value_t *strip_codeinfo_meta(jl_method_t *m, jl_value_t *ci_, int orig int compressed = 0; if (!jl_is_code_info(ci_)) { compressed = 1; - ci = jl_uncompress_ir(m, (jl_value_t*)ci_); + ci = jl_uncompress_ir(m, NULL, (jl_value_t*)ci_); } else { ci = (jl_code_info_t*)ci_; diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index 9e83fbe38f490..7f122142c1fff 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -7,7 +7,7 @@ Provide serialization of Julia objects via the functions """ module Serialization -import Base: GMP, Bottom, unsafe_convert, uncompressed_ast +import Base: GMP, Bottom, unsafe_convert import Core: svec, SimpleVector using Base: unaliascopy, unwrap_unionall, require_one_based_indexing, ntupleany using Core.IR @@ -447,7 +447,7 @@ function serialize(s::AbstractSerializer, meth::Method) serialize(s, meth.constprop) serialize(s, meth.purity) if isdefined(meth, :source) - serialize(s, Base._uncompressed_ast(meth, meth.source)) + serialize(s, Base._uncompressed_ast(meth)) else serialize(s, nothing) end @@ -1212,25 +1212,27 @@ function deserialize(s::AbstractSerializer, ::Type{CodeInfo}) ci.slottypes = deserialize(s) if format_version(s) <= 26 deserialize(s) # rettype - deserialize(s) # parent + ci.parent = deserialize(s) world_or_edges = deserialize(s) - pre_13 = isa(world_or_edges, Integer) + pre_13 = isa(world_or_edges, Union{UInt, Int}) if pre_13 - ci.min_world = world_or_edges + ci.min_world = reinterpret(UInt, world_or_edges) + ci.max_world = reinterpret(UInt, deserialize(s)) else ci.edges = world_or_edges - ci.min_world = reinterpret(UInt, deserialize(s)) - ci.max_world = reinterpret(UInt, deserialize(s)) + ci.min_world = deserialize(s)::UInt + ci.max_world = deserialize(s)::UInt end else + ci.parent = deserialize(s) ci.method_for_inference_limit_heuristics = deserialize(s) ci.edges = deserialize(s) - ci.min_world = reinterpret(UInt, deserialize(s)) - ci.max_world = reinterpret(UInt, deserialize(s)) + ci.min_world = deserialize(s)::UInt + ci.max_world = deserialize(s)::UInt end end if format_version(s) <= 26 - deserialize(s) # inferred + deserialize(s)::Bool # inferred end if format_version(s) < 22 inlining_cost = deserialize(s) diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index 6518d7fbe3d30..80288bac486ff 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -334,12 +334,12 @@ function CC.abstract_call(interp::NoinlineInterpreter, end return ret end -function CC.inlining_policy(interp::NoinlineInterpreter, +function CC.src_inlining_policy(interp::NoinlineInterpreter, @nospecialize(src), @nospecialize(info::CallInfo), stmt_flag::UInt32) if isa(info, NoinlineCallInfo) - return nothing + return false end - return @invoke CC.inlining_policy(interp::CC.AbstractInterpreter, + return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, src::Any, info::CallInfo, stmt_flag::UInt32) end @@ -485,14 +485,18 @@ function CC.transform_result_for_cache(interp::CustomDataInterp, mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) return CustomData(inferred_result) end -function CC.inlining_policy(interp::CustomDataInterp, @nospecialize(src), +function CC.src_inlining_policy(interp::CustomDataInterp, @nospecialize(src), @nospecialize(info::CC.CallInfo), stmt_flag::UInt32) if src isa CustomData src = src.inferred end - return @invoke CC.inlining_policy(interp::CC.AbstractInterpreter, src::Any, - info::CC.CallInfo, stmt_flag::UInt32) + return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, src::Any, + info::CC.CallInfo, stmt_flag::UInt32) end +CC.retrieve_ir_for_inlining(cached_result::CodeInstance, src::CustomData) = + CC.retrieve_ir_for_inlining(cached_result, src.inferred) +CC.retrieve_ir_for_inlining(mi::MethodInstance, src::CustomData, preserve_local_sources::Bool) = + CC.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) let src = code_typed((Int,); interp=CustomDataInterp()) do x return sin(x) + cos(x) end |> only |> first diff --git a/test/precompile.jl b/test/precompile.jl index a68d8936d1ed1..9a035997ce9f0 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -1794,14 +1794,18 @@ let newinterp_path = abspath("compiler/newinterp.jl") mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) return CustomData(inferred_result) end - function CC.inlining_policy(interp::PrecompileInterpreter, @nospecialize(src), + function CC.src_inlining_policy(interp::PrecompileInterpreter, @nospecialize(src), @nospecialize(info::CC.CallInfo), stmt_flag::UInt32) if src isa CustomData src = src.inferred end - return @invoke CC.inlining_policy(interp::CC.AbstractInterpreter, src::Any, - info::CC.CallInfo, stmt_flag::UInt32) + return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, src::Any, + info::CC.CallInfo, stmt_flag::UInt32) end + CC.retrieve_ir_for_inlining(cached_result::Core.CodeInstance, src::CustomData) = + CC.retrieve_ir_for_inlining(cached_result, src.inferred) + CC.retrieve_ir_for_inlining(mi::Core.MethodInstance, src::CustomData, preserve_local_sources::Bool) = + CC.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) end Base.return_types((Float64,)) do x