From 749eaa96b20fb8ffd4fcd021280c424a129c8639 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 6 May 2024 05:22:41 +0000 Subject: [PATCH 1/6] Restructure MethodInstance --- base/boot.jl | 2 + base/compiler/abstractinterpretation.jl | 4 +- base/compiler/inferencestate.jl | 4 +- base/compiler/ssair/inlining.jl | 16 +++---- base/compiler/typeinfer.jl | 2 +- base/compiler/types.jl | 6 +-- base/reflection.jl | 2 +- src/Makefile | 2 +- src/builtins.c | 1 + src/ccall.cpp | 6 +-- src/codegen.cpp | 26 +++++++----- src/gf.c | 26 ++++++------ src/interpreter.c | 2 +- src/jl_exported_data.inc | 1 + src/jltypes.c | 52 ++++++++++++++--------- src/julia.h | 19 +++++++-- src/method.c | 30 +++++++++---- src/precompile_utils.c | 2 +- src/staticdata.c | 9 ++-- src/staticdata_utils.c | 2 +- stdlib/InteractiveUtils/src/codeview.jl | 6 +-- stdlib/Serialization/src/Serialization.jl | 5 ++- 22 files changed, 136 insertions(+), 89 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 4a59bf6279d63..4aafe17dc14b8 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -486,6 +486,8 @@ eval(Core, quote PartialOpaque(@nospecialize(typ), @nospecialize(env), parent::MethodInstance, source) = $(Expr(:new, :PartialOpaque, :typ, :env, :parent, :source)) InterConditional(slot::Int, @nospecialize(thentype), @nospecialize(elsetype)) = $(Expr(:new, :InterConditional, :slot, :thentype, :elsetype)) MethodMatch(@nospecialize(spec_types), sparams::SimpleVector, method::Method, fully_covers::Bool) = $(Expr(:new, :MethodMatch, :spec_types, :sparams, :method, :fully_covers)) + DefaultSpecialization(sparam_vals::SimpleVector, inInference::Bool, cache_with_orig::Bool, precompiled::Bool) = + $(Expr(:new, DefaultSpecialization, :sparam_vals, :inInference, :cache_with_orig, :precompiled)) end) const NullDebugInfo = DebugInfo(:none) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 3c0f3336d6435..0e798ff351565 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2299,9 +2299,9 @@ function sp_type_rewrap(@nospecialize(T), mi::MethodInstance, isreturn::Bool) if isa(mi.def, Method) spsig = mi.def.sig if isa(spsig, UnionAll) - if !isempty(mi.sparam_vals) + if !isempty(mi.data.sparam_vals) sparam_vals = Any[isvarargtype(v) ? TypeVar(:N, Union{}, Any) : - v for v in mi.sparam_vals] + v for v in mi.data.sparam_vals] T = ccall(:jl_instantiate_type_in_env, Any, (Any, Any, Ptr{Any}), T, spsig, sparam_vals) isref && isreturn && T === Any && return Bottom # catch invalid return Ref{T} where T = Any for v in sparam_vals diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 75e947af6dac2..40ef6aa22c2f2 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -691,7 +691,7 @@ function sptypes_from_meth_instance(mi::MethodInstance) def = mi.def isa(def, Method) || return EMPTY_SPTYPES # toplevel sig = def.sig - if isempty(mi.sparam_vals) + if isempty(mi.data.sparam_vals) isa(sig, UnionAll) || return EMPTY_SPTYPES # mi is unspecialized spvals = Any[] @@ -701,7 +701,7 @@ function sptypes_from_meth_instance(mi::MethodInstance) sig′ = sig′.body end else - spvals = mi.sparam_vals + spvals = mi.data.sparam_vals end nvals = length(spvals) sptypes = Vector{VarState}(undef, nvals) diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index a77a67ab262de..0934ccc340386 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -325,7 +325,7 @@ function ir_prepare_inlining!(insert_node!::Inserter, inline_target::Union{IRCod insert_node!(NewInstruction(Expr(:code_coverage_effect), Nothing, topline)) end spvals_ssa = nothing - if !validate_sparams(mi.sparam_vals) + if !validate_sparams(mi.data.sparam_vals) # N.B. This works on the caller-side argexprs, (i.e. before the va fixup below) spvals_ssa = insert_node!( removable_if_unused(NewInstruction(Expr(:call, Core._compute_sparams, def, argexprs...), SimpleVector, topline))) @@ -775,7 +775,7 @@ end function compileable_specialization(mi::MethodInstance, effects::Effects, et::InliningEdgeTracker, @nospecialize(info::CallInfo); compilesig_invokes::Bool=true) mi_invoke = mi - method, atype, sparams = mi.def::Method, mi.specTypes, mi.sparam_vals + method, atype, sparams = mi.def::Method, mi.specTypes, mi.data.sparam_vals if compilesig_invokes new_atype = get_compileable_sig(method, atype, sparams) new_atype === nothing && return nothing @@ -790,7 +790,7 @@ function compileable_specialization(mi::MethodInstance, effects::Effects, # If this caller does not want us to optimize calls to use their # declared compilesig, then it is also likely they would handle sparams # incorrectly if there were any unknown typevars, so we conservatively return nothing - if any(@nospecialize(t)->isa(t, TypeVar), mi.sparam_vals) + if any(@nospecialize(t)->isa(t, TypeVar), mi.data.sparam_vals) return nothing end end @@ -1173,7 +1173,7 @@ function handle_invoke_call!(todo::Vector{Pair{Int,Any}}, argtypes = invoke_rewrite(sig.argtypes) if isa(result, ConstPropResult) mi = result.result.linfo - validate_sparams(mi.sparam_vals) || return nothing + validate_sparams(mi.data.sparam_vals) || return nothing if Union{} !== argtypes_to_type(argtypes) <: mi.def.sig item = resolve_todo(mi, result.result, info, flag, state; invokesig) handle_single_case!(todo, ir, idx, stmt, item, true) @@ -1430,7 +1430,7 @@ function handle_const_prop_result!(cases::Vector{InliningCase}, result::ConstPro allow_typevars::Bool) mi = result.result.linfo spec_types = match.spec_types - if !validate_sparams(mi.sparam_vals) + if !validate_sparams(mi.data.sparam_vals) (allow_typevars && !may_have_fcalls(mi.def::Method)) || return false end item = resolve_todo(mi, result.result, info, flag, state) @@ -1466,7 +1466,7 @@ function handle_semi_concrete_result!(cases::Vector{InliningCase}, result::SemiC match::MethodMatch, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) mi = result.mi spec_types = match.spec_types - validate_sparams(mi.sparam_vals) || return false + validate_sparams(mi.data.sparam_vals) || return false item = semiconcrete_result_item(result, info, flag, state) item === nothing && return false push!(cases, InliningCase(spec_types, item)) @@ -1521,7 +1521,7 @@ function handle_opaque_closure_call!(todo::Vector{Pair{Int,Any}}, result = info.result if isa(result, ConstPropResult) mi = result.result.linfo - validate_sparams(mi.sparam_vals) || return nothing + validate_sparams(mi.data.sparam_vals) || return nothing item = resolve_todo(mi, result.result, info, flag, state) elseif isa(result, ConcreteResult) item = concrete_result_item(result, info, state) @@ -1793,7 +1793,7 @@ function ssa_substitute_op!(insert_node!::Inserter, subst_inst::Instruction, @no if isa(val, Expr) e = val::Expr head = e.head - sparam_vals = ssa_substitute.mi.sparam_vals + sparam_vals = ssa_substitute.mi.data.sparam_vals if head === :static_parameter spidx = e.args[1]::Int val = sparam_vals[spidx] diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index ee3e93806f853..80400ad3847f3 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -348,7 +348,7 @@ function maybe_compress_codeinfo(interp::AbstractInterpreter, mi::MethodInstance isa(def, Method) || return ci # don't compress toplevel code cache_the_tree = true if can_discard_trees - cache_the_tree = is_inlineable(ci) || isa_compileable_sig(mi.specTypes, mi.sparam_vals, def) + cache_the_tree = is_inlineable(ci) || isa_compileable_sig(mi.specTypes, mi.data.sparam_vals, def) end if cache_the_tree if may_compress(interp) diff --git a/base/compiler/types.jl b/base/compiler/types.jl index a6f5488ef6703..9b328520602a3 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -409,7 +409,7 @@ For the `NativeInterpreter`, we don't need to do an actual cache query to know i was already inferred. If we reach this point, but the inference flag has been turned off, then it's in the cache. This is purely for a performance optimization. """ -already_inferred_quick_test(interp::NativeInterpreter, mi::MethodInstance) = !mi.inInference +already_inferred_quick_test(interp::NativeInterpreter, mi::MethodInstance) = !mi.data.inInference already_inferred_quick_test(interp::AbstractInterpreter, mi::MethodInstance) = false """ @@ -424,13 +424,13 @@ already includes detection and restriction on recursion, so it is hopefully most benign problem, since it should really only happen during the first phase of bootstrapping that we encounter this flag. """ -lock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (mi.inInference = true; nothing) +lock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (ccall(:jl_lock_mi, Cvoid, (Any,), mi); nothing) lock_mi_inference(::AbstractInterpreter, ::MethodInstance) = return """ See `lock_mi_inference`. """ -unlock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (mi.inInference = false; nothing) +unlock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (ccall(:jl_unlock_mi, Cvoid, (Any,), mi); nothing) unlock_mi_inference(::AbstractInterpreter, ::MethodInstance) = return """ diff --git a/base/reflection.jl b/base/reflection.jl index 3b6e69801aa13..da69e2442527e 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1507,7 +1507,7 @@ Unlike normal functions, the compilation heuristics still can't generate good di in some cases, but this may still allow inference not to fall over in some limited cases. """ function may_invoke_generator(mi::MethodInstance) - return may_invoke_generator(mi.def::Method, mi.specTypes, mi.sparam_vals) + return may_invoke_generator(mi.def::Method, mi.specTypes, mi.data.sparam_vals) end function may_invoke_generator(method::Method, @nospecialize(atype), sparams::SimpleVector) # If we have complete information, we may always call the generator diff --git a/src/Makefile b/src/Makefile index 3463a0cf7a16b..70bf44bb584ae 100644 --- a/src/Makefile +++ b/src/Makefile @@ -547,7 +547,7 @@ $(addprefix clang-tidy-,$(CODEGEN_SRCS)): DEBUGFLAGS_CLANG += -DJL_LIBRARY_EXPOR # Add C files as a target of `analyzesrc` and `analyzegc` and `tidysrc` tidysrc: $(addprefix clang-tidy-,$(CODEGEN_SRCS) $(SRCS)) analyzesrc: $(addprefix clang-sa-,$(CODEGEN_SRCS) $(SRCS)) -analyzegc: $(addprefix clang-sagc-,$(filter-out $(basename $(SKIP_GC_CHECK)),$(CODEGEN_SRCS) $(SRCS))) +analyzegc: $(addprefix clang-sagc-,$(filter-out $(basename $(SKIP_GC_CHECK)),$(SRCS))) analyze: analyzesrc analyzegc tidysrc clean-analyzegc: diff --git a/src/builtins.c b/src/builtins.c index dc62d7e4d4eba..658422c63ff61 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2494,6 +2494,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("IntrinsicFunction", (jl_value_t*)jl_intrinsic_type); add_builtin("Function", (jl_value_t*)jl_function_type); add_builtin("Builtin", (jl_value_t*)jl_builtin_type); + add_builtin("DefaultSpecialization", (jl_value_t*)jl_default_specialization_type); add_builtin("MethodInstance", (jl_value_t*)jl_method_instance_type); add_builtin("CodeInfo", (jl_value_t*)jl_code_info_type); add_builtin("LLVMPtr", (jl_value_t*)jl_llvmpointer_type); diff --git a/src/ccall.cpp b/src/ccall.cpp index acea9db93fd8d..116c2bc2ff4a0 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -1531,7 +1531,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) std::string err = verify_ccall_sig( /* inputs: */ rt, at, unionall, - ctx.spvals_ptr == NULL ? ctx.linfo->sparam_vals : NULL, + ctx.spvals_ptr == NULL ? jl_mi_default_spec_data(ctx.linfo)->sparam_vals : NULL, &ctx.emission_context, /* outputs: */ lrt, ctx.builder.getContext(), @@ -1982,8 +1982,8 @@ jl_cgval_t function_sig_t::emit_a_ccall( // so that the julia_to_native type checks are more likely to be doable (e.g. concrete types) at compile-time jl_value_t *jargty_in_env = jargty; if (ctx.spvals_ptr == NULL && !toboxed && unionall_env && jl_has_typevar_from_unionall(jargty, unionall_env) && - jl_svec_len(ctx.linfo->sparam_vals) > 0) { - jargty_in_env = jl_instantiate_type_in_env(jargty_in_env, unionall_env, jl_svec_data(ctx.linfo->sparam_vals)); + jl_svec_len(jl_mi_default_spec_data(ctx.linfo)->sparam_vals) > 0) { + jargty_in_env = jl_instantiate_type_in_env(jargty_in_env, unionall_env, jl_svec_data(jl_mi_default_spec_data(ctx.linfo)->sparam_vals)); if (jargty_in_env != jargty) jargty_in_env = jl_ensure_rooted(ctx, jargty_in_env); } diff --git a/src/codegen.cpp b/src/codegen.cpp index 0939d6a6b9b5f..c8efeb75d85ed 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2804,10 +2804,11 @@ static std::pair uses_specsig(jl_method_instance_t *lam, jl_value_t jl_value_t *sig = lam->specTypes; bool needsparams = false; if (jl_is_method(lam->def.method)) { - if ((size_t)jl_subtype_env_size(lam->def.method->sig) != jl_svec_len(lam->sparam_vals)) + jl_svec_t *sparams = jl_mi_default_spec_data(lam)->sparam_vals; + if ((size_t)jl_subtype_env_size(lam->def.method->sig) != jl_svec_len(sparams)) needsparams = true; - for (size_t i = 0; i < jl_svec_len(lam->sparam_vals); ++i) { - if (jl_is_typevar(jl_svecref(lam->sparam_vals, i))) + for (size_t i = 0; i < jl_svec_len(sparams); ++i) { + if (jl_is_typevar(jl_svecref(sparams, i))) needsparams = true; } } @@ -2987,8 +2988,9 @@ static jl_value_t *static_eval(jl_codectx_t &ctx, jl_value_t *ex) } else if (e->head == jl_static_parameter_sym) { size_t idx = jl_unbox_long(jl_exprarg(e, 0)); - if (idx <= jl_svec_len(ctx.linfo->sparam_vals)) { - jl_value_t *e = jl_svecref(ctx.linfo->sparam_vals, idx - 1); + jl_svec_t *sparams = jl_mi_default_spec_data(ctx.linfo)->sparam_vals; + if (idx <= jl_svec_len(sparams)) { + jl_value_t *e = jl_svecref(sparams, idx - 1); if (jl_is_typevar(e)) return NULL; return e; @@ -5523,8 +5525,9 @@ static jl_cgval_t emit_checked_var(jl_codectx_t &ctx, Value *bp, jl_sym_t *name, static jl_cgval_t emit_sparam(jl_codectx_t &ctx, size_t i) { - if (jl_svec_len(ctx.linfo->sparam_vals) > 0) { - jl_value_t *e = jl_svecref(ctx.linfo->sparam_vals, i); + jl_svec_t *sparams = jl_mi_default_spec_data(ctx.linfo)->sparam_vals; + if (jl_svec_len(sparams) > 0) { + jl_value_t *e = jl_svecref(sparams, i); if (!jl_is_typevar(e)) { return mark_julia_const(ctx, e); } @@ -5579,8 +5582,9 @@ static jl_cgval_t emit_isdefined(jl_codectx_t &ctx, jl_value_t *sym) else if (jl_is_expr(sym)) { assert(((jl_expr_t*)sym)->head == jl_static_parameter_sym && "malformed isdefined expression"); size_t i = jl_unbox_long(jl_exprarg(sym, 0)) - 1; - if (jl_svec_len(ctx.linfo->sparam_vals) > 0) { - jl_value_t *e = jl_svecref(ctx.linfo->sparam_vals, i); + jl_svec_t *sparams = jl_mi_default_spec_data(ctx.linfo)->sparam_vals; + if (jl_svec_len(sparams) > 0) { + jl_value_t *e = jl_svecref(sparams, i); if (!jl_is_typevar(e)) { return mark_julia_const(ctx, jl_true); } @@ -7457,8 +7461,8 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con ? (jl_unionall_t*)ctx.linfo->def.method->sig : NULL; jl_svec_t *sparam_vals = NULL; - if (ctx.spvals_ptr == NULL && jl_svec_len(ctx.linfo->sparam_vals) > 0) - sparam_vals = ctx.linfo->sparam_vals; + if (ctx.spvals_ptr == NULL && jl_svec_len(jl_mi_default_spec_data(ctx.linfo)->sparam_vals) > 0) + sparam_vals = jl_mi_default_spec_data(ctx.linfo)->sparam_vals; jl_value_t *rt = declrt; if (jl_is_abstract_ref_type(declrt)) { diff --git a/src/gf.c b/src/gf.c index b8d54ce5995da..eb2fad8cc5465 100644 --- a/src/gf.c +++ b/src/gf.c @@ -259,7 +259,7 @@ jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi) { jl_method_t *m = mi->def.method; jl_value_t *type = mi->specTypes; - jl_svec_t *sparams = mi->sparam_vals; + jl_svec_t *sparams = jl_mi_default_spec_data(mi)->sparam_vals; return jl_specializations_get_linfo_(m, type, sparams, mi); } @@ -354,7 +354,7 @@ jl_code_instance_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int fo jl_code_instance_t *ci = NULL; #ifdef ENABLE_INFERENCE - if (mi->inInference && !force) + if (jl_mi_default_spec_data(mi)->inInference && !force) return NULL; JL_TIMING(INFERENCE, INFERENCE); jl_value_t **fargs; @@ -380,7 +380,7 @@ jl_code_instance_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int fo ct->ptls->in_pure_callback = 0; size_t last_age = ct->world_age; ct->world_age = jl_typeinf_world; - mi->inInference = 1; + jl_mi_default_spec_data(mi)->inInference = 1; // first bit is for reentrant timing, // so adding 1 to the bit above performs // inference reentrancy counter addition. @@ -415,7 +415,7 @@ jl_code_instance_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int fo ct->world_age = last_age; ct->reentrant_timing -= 0b10; ct->ptls->in_pure_callback = last_pure; - mi->inInference = 0; + jl_mi_default_spec_data(mi)->inInference = 0; #ifdef _OS_WINDOWS_ SetLastError(last_error); #endif @@ -1326,7 +1326,7 @@ static jl_method_instance_t *cache_method( } // TODO: maybe assert(jl_isa_compileable_sig(compilationsig, sparams, definition)); newmeth = jl_specializations_get_linfo(definition, (jl_value_t*)compilationsig, sparams); - if (newmeth->cache_with_orig) + if (jl_mi_default_spec_data(newmeth)->cache_with_orig) cache_with_orig = 1; jl_tupletype_t *cachett = tt; @@ -1396,7 +1396,7 @@ static jl_method_instance_t *cache_method( } else { // do not revisit this decision - newmeth->cache_with_orig = 1; + jl_mi_default_spec_data(newmeth)->cache_with_orig = 1; } } @@ -2658,11 +2658,11 @@ jl_value_t *jl_fptr_args(jl_value_t *f, jl_value_t **args, uint32_t nargs, jl_co return invoke(f, args, nargs); } -jl_value_t *jl_fptr_sparam(jl_value_t *f, jl_value_t **args, uint32_t nargs, jl_code_instance_t *m) +jl_value_t *jl_fptr_sparam(jl_value_t *f, jl_value_t **args, uint32_t nargs, jl_code_instance_t *ci) { - jl_svec_t *sparams = m->def->sparam_vals; + jl_svec_t *sparams = jl_mi_default_spec_data(ci->def)->sparam_vals; assert(sparams != jl_emptysvec); - jl_fptr_sparam_t invoke = jl_atomic_load_relaxed(&m->specptr.fptr3); + jl_fptr_sparam_t invoke = jl_atomic_load_relaxed(&ci->specptr.fptr3); assert(invoke && "Forgot to set specptr for jl_fptr_sparam!"); return invoke(f, args, nargs, sparams); } @@ -2728,7 +2728,7 @@ jl_method_instance_t *jl_normalize_to_compilable_mi(jl_method_instance_t *mi JL_ jl_methtable_t *mt = jl_method_get_table(def); if ((jl_value_t*)mt == jl_nothing) return mi; - jl_value_t *compilationsig = jl_normalize_to_compilable_sig(mt, (jl_datatype_t*)mi->specTypes, mi->sparam_vals, def, 1); + jl_value_t *compilationsig = jl_normalize_to_compilable_sig(mt, (jl_datatype_t*)mi->specTypes, jl_mi_default_spec_data(mi)->sparam_vals, def, 1); if (compilationsig == jl_nothing || jl_egal(compilationsig, mi->specTypes)) return mi; jl_svec_t *env = NULL; @@ -2898,7 +2898,7 @@ static void jl_compile_now(jl_method_instance_t *mi) JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tupletype_t *types, size_t world) { size_t tworld = jl_typeinf_world; - jl_atomic_store_relaxed(&mi->precompiled, 1); + jl_atomic_store_relaxed(&jl_mi_default_spec_data(mi)->precompiled, 1); if (jl_generating_output()) { jl_compile_now(mi); // In addition to full compilation of the compilation-signature, if `types` is more specific (e.g. due to nospecialize), @@ -2913,7 +2913,7 @@ JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tuplet types2 = jl_type_intersection_env((jl_value_t*)types, (jl_value_t*)mi->def.method->sig, &tpenv2); jl_method_instance_t *mi2 = jl_specializations_get_linfo(mi->def.method, (jl_value_t*)types2, tpenv2); JL_GC_POP(); - jl_atomic_store_relaxed(&mi2->precompiled, 1); + jl_atomic_store_relaxed(&jl_mi_default_spec_data(mi2)->precompiled, 1); if (jl_rettype_inferred_native(mi2, world, world) == jl_nothing) (void)jl_type_infer(mi2, world, 1, SOURCE_MODE_NOT_REQUIRED); if (jl_typeinf_func && jl_atomic_load_relaxed(&mi->def.method->primary_world) <= tworld) { @@ -3742,7 +3742,7 @@ static jl_value_t *ml_matches(jl_methtable_t *mt, env.match.ti = unw; } else if (jl_egal((jl_value_t*)type, mi->specTypes)) { - env.match.env = mi->sparam_vals; + env.match.env = jl_mi_default_spec_data(mi)->sparam_vals; env.match.ti = mi->specTypes; } else { diff --git a/src/interpreter.c b/src/interpreter.c index 5760386324527..a898a4a1ae34a 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -778,7 +778,7 @@ jl_value_t *NOINLINE jl_fptr_interpret_call(jl_value_t *f, jl_value_t **args, ui s->locals[defargs - 1] = jl_f_tuple(NULL, &args[defargs - 2], nargs + 2 - defargs); } } - s->sparam_vals = mi->sparam_vals; + s->sparam_vals = jl_mi_default_spec_data(mi)->sparam_vals; s->preevaluation = 0; s->continue_at = 0; s->mi = mi; diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index 79ff437841879..cfcfbd377e127 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -84,6 +84,7 @@ XX(jl_memoryref_uint8_type) \ XX(jl_methoderror_type) \ XX(jl_method_instance_type) \ + XX(jl_default_specialization_type) \ XX(jl_method_match_type) \ XX(jl_method_type) \ XX(jl_methtable_type) \ diff --git a/src/jltypes.c b/src/jltypes.c index da69686f60695..cfdf801ab385b 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3284,33 +3284,44 @@ void jl_init_types(void) JL_GC_DISABLED const static uint32_t method_atomicfields[1] = { 0x00000030 }; // (1<<4)|(1<<5) jl_method_type->name->atomicfields = method_atomicfields; + jl_default_specialization_type = + jl_new_datatype(jl_symbol("DefaultSpecialization"), core, + jl_any_type, jl_emptysvec, + jl_perm_symsvec(4, + "sparam_vals", + "inInference", + "cache_with_orig", + "precompiled"), + jl_svec(4, + jl_simplevector_type, + jl_bool_type, + jl_bool_type, + jl_bool_type), + jl_emptysvec, 0, 0, 4); + jl_method_instance_type = jl_new_datatype(jl_symbol("MethodInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(8, + jl_perm_symsvec(6, "def", "specTypes", - "sparam_vals", "backedges", "cache", - "inInference", - "cache_with_orig", - "precompiled"), - jl_svec(8, + "next", + "data"), + jl_svec(6, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), jl_any_type, - jl_simplevector_type, jl_array_any_type, - jl_any_type/*jl_code_instance_type*/, - jl_bool_type, - jl_bool_type, - jl_bool_type), + jl_any_type,/*jl_code_instance_type*/ + jl_any_type,/*jl_method_instance_type*/ + jl_default_specialization_type), jl_emptysvec, - 0, 1, 3); + 0, 1, 2); // These fields should be constant, but Serialization wants to mutate them in initialization - //const static uint32_t method_instance_constfields[1] = { 0x00000007 }; // (1<<0)|(1<<1)|(1<<2); - const static uint32_t method_instance_atomicfields[1] = { 0x0000090 }; // (1<<4)|(1<<7); - //Fields 4 and 5 must be protected by method->write_lock, and thus all operations on jl_method_instance_t are threadsafe. TODO: except inInference + //const static uint32_t method_instance_constfields[1] = { 0x00000007 }; // (1<<0)|(1<<1); + const static uint32_t method_instance_atomicfields[1] = { 0x0000008 }; // (1<<3) + //Fields 3 and 4 must be protected by method->write_lock, and thus all operations on jl_method_instance_t are threadsafe. TODO: except inInference //jl_method_instance_type->name->constfields = method_instance_constfields; jl_method_instance_type->name->atomicfields = method_instance_atomicfields; @@ -3498,7 +3509,8 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_methtable_type->types, 10, jl_uint8_type); jl_svecset(jl_method_type->types, 13, jl_method_instance_type); //jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type) - jl_svecset(jl_method_instance_type->types, 4, jl_code_instance_type); + jl_svecset(jl_method_instance_type->types, 3, jl_code_instance_type); + jl_svecset(jl_method_instance_type->types, 4, jl_method_instance_type); jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type); jl_svecset(jl_code_instance_type->types, 17, jl_voidpointer_type); jl_svecset(jl_binding_type->types, 1, jl_globalref_type); @@ -3509,10 +3521,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_compute_field_offsets(jl_uniontype_type); jl_compute_field_offsets(jl_tvar_type); jl_compute_field_offsets(jl_methtable_type); - jl_compute_field_offsets(jl_method_instance_type); - jl_compute_field_offsets(jl_code_instance_type); jl_compute_field_offsets(jl_unionall_type); - jl_compute_field_offsets(jl_simplevector_type); jl_compute_field_offsets(jl_symbol_type); // override ismutationfree for builtin types that are mutable for identity @@ -3523,6 +3532,11 @@ void jl_init_types(void) JL_GC_DISABLED assert(((jl_datatype_t*)jl_array_any_type)->ismutationfree == 0); assert(((jl_datatype_t*)jl_array_uint8_type)->ismutationfree == 0); + jl_compute_field_offsets(jl_simplevector_type); + jl_compute_field_offsets(jl_default_specialization_type); + jl_compute_field_offsets(jl_method_instance_type); + jl_compute_field_offsets(jl_code_instance_type); + // Technically not ismutationfree, but there's a separate system to deal // with mutations for global state. jl_module_type->ismutationfree = 1; diff --git a/src/julia.h b/src/julia.h index 0d46f15776610..2019651f99779 100644 --- a/src/julia.h +++ b/src/julia.h @@ -400,6 +400,14 @@ typedef struct _jl_method_t { jl_mutex_t writelock; } jl_method_t; +typedef struct _jl_mi_default_spec_t { + JL_DATA_TYPE + jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig + uint8_t inInference; // flags to tell if inference is running on this object + uint8_t cache_with_orig; // !cache_with_specTypes + _Atomic(uint8_t) precompiled; // true if this instance was generated by an explicit `precompile(...)` call +} jl_mi_default_spec_t; + // This type is a placeholder to cache data for a specType signature specialization of a Method // can can be used as a unique dictionary key representation of a call to a particular Method // with a particular set of argument types @@ -411,12 +419,14 @@ struct _jl_method_instance_t { jl_method_t *method; // method this is specialized from } def; // pointer back to the context for this code jl_value_t *specTypes; // argument types this was specialized for - jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig jl_array_t *backedges; // list of method-instances which call this method-instance; `invoke` records (invokesig, caller) pairs _Atomic(struct _jl_code_instance_t*) cache; - uint8_t inInference; // flags to tell if inference is running on this object - uint8_t cache_with_orig; // !cache_with_specTypes - _Atomic(uint8_t) precompiled; // true if this instance was generated by an explicit `precompile(...)` call + _Atomic(struct _jl_method_instance_t*) next; + /* Data follows inline here */ +}; + +static inline jl_mi_default_spec_t *jl_mi_default_spec_data(jl_method_instance_t *mi) JL_NOTSAFEPOINT { + return (jl_mi_default_spec_t*)(&mi[1]); }; // OpaqueClosure @@ -841,6 +851,7 @@ extern JL_DLLIMPORT jl_unionall_t *jl_opaque_closure_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_typename_t *jl_opaque_closure_typename JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_value_t *jl_bottom_type JL_GLOBALLY_ROOTED; +extern JL_DLLIMPORT jl_datatype_t *jl_default_specialization_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_method_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_info_type JL_GLOBALLY_ROOTED; diff --git a/src/method.c b/src/method.c index 59c24671f46f3..3f8b997b9c452 100644 --- a/src/method.c +++ b/src/method.c @@ -609,19 +609,30 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) { jl_task_t *ct = jl_current_task; jl_method_instance_t *mi = - (jl_method_instance_t*)jl_gc_alloc(ct->ptls, sizeof(jl_method_instance_t), + (jl_method_instance_t*)jl_gc_alloc(ct->ptls, jl_datatype_size(jl_method_instance_type), jl_method_instance_type); mi->def.value = NULL; mi->specTypes = NULL; - mi->sparam_vals = jl_emptysvec; + mi->next = NULL; + jl_mi_default_spec_data(mi)->sparam_vals = jl_emptysvec; mi->backedges = NULL; jl_atomic_store_relaxed(&mi->cache, NULL); - mi->inInference = 0; - mi->cache_with_orig = 0; - jl_atomic_store_relaxed(&mi->precompiled, 0); + jl_mi_default_spec_data(mi)->inInference = 0; + jl_mi_default_spec_data(mi)->cache_with_orig = 0; + jl_atomic_store_relaxed(&jl_mi_default_spec_data(mi)->precompiled, 0); return mi; } +JL_DLLEXPORT void jl_lock_mi(jl_method_instance_t *mi) +{ + jl_mi_default_spec_data(mi)->inInference = 1; +} + +JL_DLLEXPORT void jl_unlock_mi(jl_method_instance_t *mi) +{ + jl_mi_default_spec_data(mi)->inInference = 0; +} + JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) { jl_task_t *ct = jl_current_task; @@ -758,17 +769,18 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t // invoke code generator jl_tupletype_t *ttdt = (jl_tupletype_t*)jl_unwrap_unionall(tt); - ex = jl_call_staged(def, generator, world, mi->sparam_vals, jl_svec_data(ttdt->parameters), jl_nparams(ttdt)); + jl_svec_t *sparams = jl_mi_default_spec_data(mi)->sparam_vals; + ex = jl_call_staged(def, generator, world, sparams, jl_svec_data(ttdt->parameters), jl_nparams(ttdt)); // do some post-processing if (jl_is_code_info(ex)) { func = (jl_code_info_t*)ex; jl_array_t *stmts = (jl_array_t*)func->code; - jl_resolve_globals_in_ir(stmts, def->module, mi->sparam_vals, 1); + jl_resolve_globals_in_ir(stmts, def->module, sparams, 1); } else { // Lower the user's expression and resolve references to the type parameters - func = jl_expand_and_resolve(ex, def->module, mi->sparam_vals); + func = jl_expand_and_resolve(ex, def->module, sparams); if (!jl_is_code_info(func)) { if (jl_is_expr(func) && ((jl_expr_t*)func)->head == jl_error_sym) { ct->ptls->in_pure_callback = 0; @@ -852,7 +864,7 @@ jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_s jl_method_instance_t *new_linfo = jl_new_method_instance_uninit(); new_linfo->def.method = m; new_linfo->specTypes = types; - new_linfo->sparam_vals = sp; + jl_mi_default_spec_data(new_linfo)->sparam_vals = sp; return new_linfo; } diff --git a/src/precompile_utils.c b/src/precompile_utils.c index 5a4f599d1f0eb..d5f70dea5aa59 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -251,7 +251,7 @@ static void *jl_precompile_(jl_array_t *m, int external_linkage) mi = (jl_method_instance_t*)item; size_t min_world = 0; size_t max_world = ~(size_t)0; - if (mi != jl_atomic_load_relaxed(&mi->def.method->unspecialized) && !jl_isa_compileable_sig((jl_tupletype_t*)mi->specTypes, mi->sparam_vals, mi->def.method)) + if (mi != jl_atomic_load_relaxed(&mi->def.method->unspecialized) && !jl_isa_compileable_sig((jl_tupletype_t*)mi->specTypes, jl_mi_default_spec_data(mi)->sparam_vals, mi->def.method)) mi = jl_get_specialization1((jl_tupletype_t*)mi->specTypes, jl_atomic_load_acquire(&jl_world_counter), &min_world, &max_world, 0); if (mi) jl_array_ptr_1d_push(m2, (jl_value_t*)mi); diff --git a/src/staticdata.c b/src/staticdata.c index f6461e0b4285f..c8cda471eda21 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -100,7 +100,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 190 +#define NUM_TAGS 191 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -126,6 +126,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_string_type); INSERT_TAG(jl_module_type); INSERT_TAG(jl_tvar_type); + INSERT_TAG(jl_default_specialization_type); INSERT_TAG(jl_method_instance_type); INSERT_TAG(jl_method_type); INSERT_TAG(jl_code_instance_type); @@ -803,7 +804,7 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ // we only need 3 specific fields of this (the rest are not used) jl_queue_for_serialization(s, mi->def.value); jl_queue_for_serialization(s, mi->specTypes); - jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals); + jl_queue_for_serialization(s, (jl_value_t*)jl_mi_default_spec_data(mi)->sparam_vals); goto done_fields; } else if (jl_is_method(def) && jl_object_in_image(def)) { @@ -1325,7 +1326,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED jl_method_instance_t *mi = (jl_method_instance_t*)v; write_pointerfield(s, mi->def.value); write_pointerfield(s, mi->specTypes); - write_pointerfield(s, (jl_value_t*)mi->sparam_vals); + write_pointerfield(s, (jl_value_t*)jl_mi_default_spec_data(mi)->sparam_vals); continue; } else if (jl_is_datatype(v)) { @@ -1624,7 +1625,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED else if (jl_is_method_instance(v)) { assert(f == s->s); jl_method_instance_t *newmi = (jl_method_instance_t*)&f->buf[reloc_offset]; - jl_atomic_store_relaxed(&newmi->precompiled, 0); + jl_atomic_store_relaxed(&jl_mi_default_spec_data(newmi)->precompiled, 0); } else if (jl_is_code_instance(v)) { assert(f == s->s); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 439dd79f76a5e..e3164e6b292e1 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -159,7 +159,7 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, if (jl_is_method(mod)) mod = ((jl_method_t*)mod)->module; assert(jl_is_module(mod)); - if (jl_atomic_load_relaxed(&mi->precompiled) || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes)) { + if (jl_atomic_load_relaxed(&jl_mi_default_spec_data(mi)->precompiled) || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes)) { return 1; } if (!mi->backedges) { diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index 015269d4d8dc0..8834b2cbb3924 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -83,14 +83,14 @@ function print_warntype_mi(io::IO, mi::Core.MethodInstance) println(io, mi) print(io, " from ") println(io, mi.def) - if !isempty(mi.sparam_vals) + if !isempty(mi.data.sparam_vals) println(io, "Static Parameters") sig = mi.def.sig warn_color = Base.warn_color() # more mild user notification - for i = 1:length(mi.sparam_vals) + for i = 1:length(mi.data.sparam_vals) sig = sig::UnionAll name = sig.var.name - val = mi.sparam_vals[i] + val = mi.data.sparam_vals[i] print_highlighted(io::IO, v::String, color::Symbol) = if highlighting[:warntype] Base.printstyled(io, v; color) diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index c8b5314fe719d..a260580237371 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -471,7 +471,7 @@ function serialize(s::AbstractSerializer, linfo::Core.MethodInstance) serialize_cycle(s, linfo) && return writetag(s.io, METHODINSTANCE_TAG) serialize(s, nothing) # for backwards compat - serialize(s, linfo.sparam_vals) + serialize(s, linfo.data.sparam_vals) serialize(s, Any) # for backwards compat serialize(s, linfo.specTypes) serialize(s, linfo.def) @@ -1138,10 +1138,11 @@ function deserialize(s::AbstractSerializer, ::Type{Core.MethodInstance}) # for reading files prior to v1.2 handle_deserialize(s, tag) end - linfo.sparam_vals = deserialize(s)::SimpleVector + sparam_vals = deserialize(s)::SimpleVector _rettype = deserialize(s) # for backwards compat linfo.specTypes = deserialize(s) linfo.def = deserialize(s) + linfo.data = DefaultSpecialization(sparam_vals, false, false, false) return linfo end From 51892437387dd25b4d10683c45c86939ef4ec903 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 6 May 2024 06:38:43 +0000 Subject: [PATCH 2/6] MethodInstance -> MethodSpecialization{D} --- base/boot.jl | 4 +-- src/builtins.c | 3 +- src/jl_exported_data.inc | 3 +- src/jltypes.c | 43 +++++++++++++---------- src/julia.h | 3 +- src/rtutils.c | 7 +++- src/staticdata.c | 5 +-- stdlib/Serialization/src/Serialization.jl | 2 +- 8 files changed, 43 insertions(+), 27 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 4aafe17dc14b8..b43f2c226c79e 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -486,8 +486,8 @@ eval(Core, quote PartialOpaque(@nospecialize(typ), @nospecialize(env), parent::MethodInstance, source) = $(Expr(:new, :PartialOpaque, :typ, :env, :parent, :source)) InterConditional(slot::Int, @nospecialize(thentype), @nospecialize(elsetype)) = $(Expr(:new, :InterConditional, :slot, :thentype, :elsetype)) MethodMatch(@nospecialize(spec_types), sparams::SimpleVector, method::Method, fully_covers::Bool) = $(Expr(:new, :MethodMatch, :spec_types, :sparams, :method, :fully_covers)) - DefaultSpecialization(sparam_vals::SimpleVector, inInference::Bool, cache_with_orig::Bool, precompiled::Bool) = - $(Expr(:new, DefaultSpecialization, :sparam_vals, :inInference, :cache_with_orig, :precompiled)) + DefaultSpec(sparam_vals::SimpleVector, inInference::Bool, cache_with_orig::Bool, precompiled::Bool) = + $(Expr(:new, DefaultSpec, :sparam_vals, :inInference, :cache_with_orig, :precompiled)) end) const NullDebugInfo = DebugInfo(:none) diff --git a/src/builtins.c b/src/builtins.c index 658422c63ff61..a3ff0c2845c09 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2494,7 +2494,8 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("IntrinsicFunction", (jl_value_t*)jl_intrinsic_type); add_builtin("Function", (jl_value_t*)jl_function_type); add_builtin("Builtin", (jl_value_t*)jl_builtin_type); - add_builtin("DefaultSpecialization", (jl_value_t*)jl_default_specialization_type); + add_builtin("DefaultSpec", (jl_value_t*)jl_default_spec_type); + add_builtin("MethodSpecialization", (jl_value_t*)jl_method_specialization_type); add_builtin("MethodInstance", (jl_value_t*)jl_method_instance_type); add_builtin("CodeInfo", (jl_value_t*)jl_code_info_type); add_builtin("LLVMPtr", (jl_value_t*)jl_llvmpointer_type); diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index cfcfbd377e127..8a505e0ecd430 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -84,7 +84,8 @@ XX(jl_memoryref_uint8_type) \ XX(jl_methoderror_type) \ XX(jl_method_instance_type) \ - XX(jl_default_specialization_type) \ + XX(jl_method_specialization_type) \ + XX(jl_default_spec_type) \ XX(jl_method_match_type) \ XX(jl_method_type) \ XX(jl_methtable_type) \ diff --git a/src/jltypes.c b/src/jltypes.c index cfdf801ab385b..93776964060ed 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -2273,12 +2273,12 @@ static jl_svec_t *inst_ftypes(jl_svec_t *p, jl_typeenv_t *env, jl_typestack_t *s size_t i; size_t lp = jl_svec_len(p); jl_svec_t *np = jl_alloc_svec(lp); - jl_value_t *pi = NULL; + jl_value_t *orig_pi = NULL, *pi = NULL; JL_GC_PUSH2(&np, &pi); for (i = 0; i < lp; i++) { - pi = jl_svecref(p, i); + orig_pi = jl_svecref(p, i); JL_TRY { - pi = inst_type_w_(pi, env, stack, 1); + pi = inst_type_w_(orig_pi, env, stack, 1); if (!jl_is_type(pi) && !jl_is_typevar(pi)) { pi = jl_bottom_type; } @@ -2286,7 +2286,9 @@ static jl_svec_t *inst_ftypes(jl_svec_t *p, jl_typeenv_t *env, jl_typestack_t *s JL_CATCH { pi = jl_bottom_type; } - jl_value_t *globalpi = jl_as_global_root(pi, cacheable); + jl_value_t *globalpi = NULL; + if (orig_pi != pi) + globalpi = jl_as_global_root(pi, cacheable); jl_svecset(np, i, globalpi ? globalpi : pi); } JL_GC_POP(); @@ -3284,8 +3286,8 @@ void jl_init_types(void) JL_GC_DISABLED const static uint32_t method_atomicfields[1] = { 0x00000030 }; // (1<<4)|(1<<5) jl_method_type->name->atomicfields = method_atomicfields; - jl_default_specialization_type = - jl_new_datatype(jl_symbol("DefaultSpecialization"), core, + jl_default_spec_type = + jl_new_datatype(jl_symbol("DefaultSpec"), core, jl_any_type, jl_emptysvec, jl_perm_symsvec(4, "sparam_vals", @@ -3299,9 +3301,10 @@ void jl_init_types(void) JL_GC_DISABLED jl_bool_type), jl_emptysvec, 0, 0, 4); - jl_method_instance_type = - jl_new_datatype(jl_symbol("MethodInstance"), core, - jl_any_type, jl_emptysvec, + tv = jl_svec1(tvar("D")); + jl_datatype_t *jl_meth_spec_type = + jl_new_datatype(jl_symbol("MethodSpecialization"), core, + jl_any_type, tv, jl_perm_symsvec(6, "def", "specTypes", @@ -3314,16 +3317,18 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_array_any_type, jl_any_type,/*jl_code_instance_type*/ - jl_any_type,/*jl_method_instance_type*/ - jl_default_specialization_type), + jl_any_type,/*jl_method_specialization_type*/ + jl_svecref(tv, 0)), jl_emptysvec, 0, 1, 2); + jl_method_specialization_type = + (jl_unionall_t*)jl_meth_spec_type->name->wrapper; // These fields should be constant, but Serialization wants to mutate them in initialization //const static uint32_t method_instance_constfields[1] = { 0x00000007 }; // (1<<0)|(1<<1); const static uint32_t method_instance_atomicfields[1] = { 0x0000008 }; // (1<<3) //Fields 3 and 4 must be protected by method->write_lock, and thus all operations on jl_method_instance_t are threadsafe. TODO: except inInference //jl_method_instance_type->name->constfields = method_instance_constfields; - jl_method_instance_type->name->atomicfields = method_instance_atomicfields; + jl_meth_spec_type->name->atomicfields = method_instance_atomicfields; jl_code_instance_type = jl_new_datatype(jl_symbol("CodeInstance"), core, @@ -3345,7 +3350,7 @@ void jl_init_types(void) JL_GC_DISABLED "specsigflags", "precompile", "relocatability", "invoke", "specptr"), // function object decls jl_svec(18, - jl_method_instance_type, + jl_method_specialization_type, jl_any_type, jl_any_type, jl_ulong_type, @@ -3483,7 +3488,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_partial_opaque_type = jl_new_datatype(jl_symbol("PartialOpaque"), core, jl_any_type, jl_emptysvec, jl_perm_symsvec(4, "typ", "env", "parent", "source"), - jl_svec(4, jl_type_type, jl_any_type, jl_method_instance_type, jl_any_type), + jl_svec(4, jl_type_type, jl_any_type, jl_method_specialization_type, jl_any_type), jl_emptysvec, 0, 0, 4); // complete builtin type metadata @@ -3507,10 +3512,9 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_methtable_type->types, 8, jl_long_type); // uint32_t plus alignment jl_svecset(jl_methtable_type->types, 9, jl_uint8_type); jl_svecset(jl_methtable_type->types, 10, jl_uint8_type); - jl_svecset(jl_method_type->types, 13, jl_method_instance_type); //jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type) - jl_svecset(jl_method_instance_type->types, 3, jl_code_instance_type); - jl_svecset(jl_method_instance_type->types, 4, jl_method_instance_type); + jl_svecset(jl_meth_spec_type->types, 3, jl_code_instance_type); + jl_svecset(jl_meth_spec_type->types, 4, jl_method_specialization_type); jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type); jl_svecset(jl_code_instance_type->types, 17, jl_voidpointer_type); jl_svecset(jl_binding_type->types, 1, jl_globalref_type); @@ -3532,8 +3536,11 @@ void jl_init_types(void) JL_GC_DISABLED assert(((jl_datatype_t*)jl_array_any_type)->ismutationfree == 0); assert(((jl_datatype_t*)jl_array_uint8_type)->ismutationfree == 0); + jl_method_instance_type = (jl_datatype_t*)jl_apply_type1((jl_value_t*)jl_method_specialization_type, (jl_value_t*)jl_default_spec_type); + jl_svecset(jl_method_type->types, 13, jl_method_instance_type); + jl_compute_field_offsets(jl_simplevector_type); - jl_compute_field_offsets(jl_default_specialization_type); + jl_compute_field_offsets(jl_default_spec_type); jl_compute_field_offsets(jl_method_instance_type); jl_compute_field_offsets(jl_code_instance_type); diff --git a/src/julia.h b/src/julia.h index 2019651f99779..6d2f216b72a66 100644 --- a/src/julia.h +++ b/src/julia.h @@ -851,7 +851,8 @@ extern JL_DLLIMPORT jl_unionall_t *jl_opaque_closure_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_typename_t *jl_opaque_closure_typename JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_value_t *jl_bottom_type JL_GLOBALLY_ROOTED; -extern JL_DLLIMPORT jl_datatype_t *jl_default_specialization_type JL_GLOBALLY_ROOTED; +extern JL_DLLIMPORT jl_datatype_t *jl_default_spec_type JL_GLOBALLY_ROOTED; +extern JL_DLLIMPORT jl_unionall_t *jl_method_specialization_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_method_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_info_type JL_GLOBALLY_ROOTED; diff --git a/src/rtutils.c b/src/rtutils.c index f18a1ac11291a..cbe14464fb286 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -800,7 +800,12 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt } else if (vt == jl_method_type) { jl_method_t *m = (jl_method_t*)v; - n += jl_static_show_func_sig(out, m->sig); + if (m->sig == (jl_value_t*)jl_anytuple_type) { + jl_printf(out, "Builtin Method for "); + jl_static_show_x(out, (jl_value_t*)m->name, depth, ctx); + } else { + n += jl_static_show_func_sig(out, m->sig); + } } else if (vt == jl_method_instance_type) { jl_method_instance_t *li = (jl_method_instance_t*)v; diff --git a/src/staticdata.c b/src/staticdata.c index c8cda471eda21..281c8274bfe57 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -100,7 +100,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 191 +#define NUM_TAGS 192 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -126,7 +126,8 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_string_type); INSERT_TAG(jl_module_type); INSERT_TAG(jl_tvar_type); - INSERT_TAG(jl_default_specialization_type); + INSERT_TAG(jl_default_spec_type); + INSERT_TAG(jl_method_specialization_type); INSERT_TAG(jl_method_instance_type); INSERT_TAG(jl_method_type); INSERT_TAG(jl_code_instance_type); diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index a260580237371..cbc4194187c57 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -1142,7 +1142,7 @@ function deserialize(s::AbstractSerializer, ::Type{Core.MethodInstance}) _rettype = deserialize(s) # for backwards compat linfo.specTypes = deserialize(s) linfo.def = deserialize(s) - linfo.data = DefaultSpecialization(sparam_vals, false, false, false) + linfo.data = DefaultSpec(sparam_vals, false, false, false) return linfo end From 3144f8e89cdc0ff8c7c4f078819e986f3ea7d09c Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 6 May 2024 07:44:17 +0000 Subject: [PATCH 3/6] rm CodeInstance owner --- base/boot.jl | 6 +++--- base/compiler/cicache.jl | 8 +++---- base/compiler/typeinfer.jl | 6 +++--- base/compiler/utilities.jl | 2 +- src/builtins.c | 1 - src/gf.c | 33 +++++++++++----------------- src/interpreter.c | 5 ++--- src/jitlayers.cpp | 3 +-- src/jl_exported_data.inc | 1 + src/jltypes.c | 28 ++++++++++++++++-------- src/julia.h | 4 ++-- src/julia_internal.h | 6 +++--- src/method.c | 44 ++++++++++++++++++++++++++------------ src/opaque_closure.c | 2 +- src/precompile_utils.c | 5 +---- src/rtutils.c | 2 +- src/staticdata.c | 3 ++- src/staticdata_utils.c | 7 ++---- src/toplevel.c | 13 +++++++---- 19 files changed, 96 insertions(+), 83 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index b43f2c226c79e..497e80c7b519e 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -503,13 +503,13 @@ end function CodeInstance( - mi::MethodInstance, owner, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), + mi::MethodInstance, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), @nospecialize(inferred), const_flags::Int32, min_world::UInt, max_world::UInt, ipo_effects::UInt32, effects::UInt32, @nospecialize(analysis_results), relocatability::UInt8, edges::DebugInfo) return ccall(:jl_new_codeinst, Ref{CodeInstance}, - (Any, Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, UInt32, Any, UInt8, Any), - mi, owner, rettype, exctype, inferred_const, inferred, const_flags, min_world, max_world, + (Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, UInt32, Any, UInt8, Any), + mi, rettype, exctype, inferred_const, inferred, const_flags, min_world, max_world, ipo_effects, effects, analysis_results, relocatability, edges) end GlobalRef(m::Module, s::Symbol) = ccall(:jl_module_globalref, Ref{GlobalRef}, (Any, Any), m, s) diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index a6ed18fe5105f..777ad819fb612 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -8,11 +8,9 @@ that have been created for the given method instance, stratified by world age ranges. This struct abstracts over access to this cache. """ struct InternalCodeCache - owner::Any # `jl_egal` is used for comparison end function setindex!(cache::InternalCodeCache, ci::CodeInstance, mi::MethodInstance) - @assert ci.owner === cache.owner ccall(:jl_mi_cache_insert, Cvoid, (Any, Any), mi, ci) return cache end @@ -50,11 +48,11 @@ WorldView(wvc::WorldView, wr::WorldRange) = WorldView(wvc.cache, wr) WorldView(wvc::WorldView, args...) = WorldView(wvc.cache, args...) function haskey(wvc::WorldView{InternalCodeCache}, mi::MethodInstance) - return ccall(:jl_rettype_inferred, Any, (Any, Any, UInt, UInt), wvc.cache.owner, mi, first(wvc.worlds), last(wvc.worlds)) !== nothing + return ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) !== nothing end function get(wvc::WorldView{InternalCodeCache}, mi::MethodInstance, default) - r = ccall(:jl_rettype_inferred, Any, (Any, Any, UInt, UInt), wvc.cache.owner, mi, first(wvc.worlds), last(wvc.worlds)) + r = ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) if r === nothing return default end @@ -73,7 +71,7 @@ function setindex!(wvc::WorldView{InternalCodeCache}, ci::CodeInstance, mi::Meth end function code_cache(interp::AbstractInterpreter) - cache = InternalCodeCache(cache_owner(interp)) + cache = InternalCodeCache() worlds = WorldRange(get_inference_world(interp)) return WorldView(cache, worlds) end diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 80400ad3847f3..de78880b14ad2 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -328,7 +328,7 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult; if !@isdefined edges edges = DebugInfo(result.linfo) end - return CodeInstance(result.linfo, owner, + return CodeInstance(result.linfo, widenconst(result_type), widenconst(result.exc_result), rettype_const, inferred_result, const_flags, first(result.valid_worlds), last(result.valid_worlds), # TODO: Actually do something with non-IPO effects @@ -935,7 +935,7 @@ more details. """ function codeinstance_for_const_with_code(interp::AbstractInterpreter, code::CodeInstance) src = codeinfo_for_const(interp, code.def, code.rettype_const) - return CodeInstance(code.def, cache_owner(interp), code.rettype, code.exctype, code.rettype_const, src, + return CodeInstance(code.def, code.rettype, code.exctype, code.rettype_const, src, Int32(0x3), code.min_world, code.max_world, code.ipo_purity_bits, code.purity_bits, code.analysis_results, code.relocatability, src.debuginfo) @@ -1109,7 +1109,7 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mod if ccall(:jl_get_module_infer, Cint, (Any,), def.module) == 0 && !generating_output(#=incremental=#false) src = retrieve_code_info(mi, get_inference_world(interp)) src isa CodeInfo || return nothing - return CodeInstance(mi, cache_owner(interp), Any, Any, nothing, src, Int32(0), + return CodeInstance(mi, Any, Any, nothing, src, Int32(0), get_inference_world(interp), get_inference_world(interp), UInt32(0), UInt32(0), nothing, UInt8(0), src.debuginfo) end diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index 65563dab795fb..0734756696cd7 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -148,7 +148,7 @@ function get_staged(mi::MethodInstance, world::UInt) end function get_cached_uninferred(mi::MethodInstance, world::UInt) - ccall(:jl_cached_uninferred, Any, (Any, UInt), mi.cache, world)::CodeInstance + ccall(:jl_cached_uninferred, Any, (Any, UInt), mi, world)::CodeInstance end function retrieve_code_info(mi::MethodInstance, world::UInt) diff --git a/src/builtins.c b/src/builtins.c index a3ff0c2845c09..b038772ff773e 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2380,7 +2380,6 @@ jl_fptr_args_t jl_get_builtin_fptr(jl_datatype_t *dt) jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_atomic_load_relaxed(&dt->name->mt->defs); jl_method_instance_t *mi = jl_atomic_load_relaxed(&entry->func.method->unspecialized); jl_code_instance_t *ci = jl_atomic_load_relaxed(&mi->cache); - assert(ci->owner == jl_nothing); return jl_atomic_load_relaxed(&ci->specptr.fptr1); } diff --git a/src/gf.c b/src/gf.c index eb2fad8cc5465..521a9991e18bb 100644 --- a/src/gf.c +++ b/src/gf.c @@ -318,7 +318,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a jl_atomic_store_relaxed(&m->unspecialized, mi); jl_gc_wb(m, mi); - jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, jl_nothing, jl_nothing, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0, NULL); jl_mi_cache_insert(mi, codeinst); @@ -444,12 +444,10 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, size_t min_world, size_t max_world, jl_debuginfo_t *edges) { - jl_value_t *owner = jl_nothing; // TODO: owner should be arg jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { if (jl_atomic_load_relaxed(&codeinst->min_world) == min_world && jl_atomic_load_relaxed(&codeinst->max_world) == max_world && - jl_egal(codeinst->owner, owner) && jl_egal(codeinst->rettype, rettype)) { if (edges == NULL) return codeinst; @@ -464,7 +462,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( codeinst = jl_atomic_load_relaxed(&codeinst->next); } codeinst = jl_new_codeinst( - mi, owner, rettype, (jl_value_t*)jl_any_type, NULL, NULL, + mi, rettype, (jl_value_t*)jl_any_type, NULL, NULL, 0, min_world, max_world, 0, 0, jl_nothing, 0, edges); jl_mi_cache_insert(mi, codeinst); return codeinst; @@ -483,7 +481,7 @@ JL_DLLEXPORT int jl_mi_cache_has_ci(jl_method_instance_t *mi, } JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( - jl_method_instance_t *mi, jl_value_t *owner, + jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *exctype, jl_value_t *inferred_const, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, @@ -496,7 +494,6 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_gc_alloc(ct->ptls, sizeof(jl_code_instance_t), jl_code_instance_type); codeinst->def = mi; - codeinst->owner = owner; jl_atomic_store_relaxed(&codeinst->min_world, min_world); jl_atomic_store_relaxed(&codeinst->max_world, max_world); codeinst->rettype = rettype; @@ -2353,13 +2350,12 @@ jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT) return unspec; } -STATIC_INLINE jl_value_t *_jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +STATIC_INLINE jl_value_t *_jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT { jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { if (jl_atomic_load_relaxed(&codeinst->min_world) <= min_world && - max_world <= jl_atomic_load_relaxed(&codeinst->max_world) && - jl_egal(codeinst->owner, owner)) { + max_world <= jl_atomic_load_relaxed(&codeinst->max_world)) { jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); if (code) @@ -2370,14 +2366,14 @@ STATIC_INLINE jl_value_t *_jl_rettype_inferred(jl_value_t *owner, jl_method_inst return (jl_value_t*)jl_nothing; } -JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT { - return (jl_value_t*)_jl_rettype_inferred(owner, mi, min_world, max_world); + return (jl_value_t*)_jl_rettype_inferred(mi, min_world, max_world); } JL_DLLEXPORT jl_value_t *jl_rettype_inferred_native(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT { - return (jl_value_t*)_jl_rettype_inferred(jl_nothing, mi, min_world, max_world); + return (jl_value_t*)_jl_rettype_inferred(mi, min_world, max_world); } JL_DLLEXPORT jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT = jl_rettype_inferred_native; @@ -2386,8 +2382,6 @@ jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi, size_t world) { jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); for (; codeinst; codeinst = jl_atomic_load_relaxed(&codeinst->next)) { - if (codeinst->owner != jl_nothing) - continue; if (jl_atomic_load_relaxed(&codeinst->min_world) <= world && world <= jl_atomic_load_relaxed(&codeinst->max_world)) { if (jl_atomic_load_relaxed(&codeinst->invoke) != NULL) return codeinst; @@ -2401,9 +2395,6 @@ jl_code_instance_t *jl_method_inferred_with_abi(jl_method_instance_t *mi JL_PROP { jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); for (; codeinst; codeinst = jl_atomic_load_relaxed(&codeinst->next)) { - if (codeinst->owner != jl_nothing) - continue; - if (jl_atomic_load_relaxed(&codeinst->min_world) <= world && world <= jl_atomic_load_relaxed(&codeinst->max_world)) { jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); if (code && (code != jl_nothing || (jl_atomic_load_relaxed(&codeinst->invoke) != NULL))) @@ -2521,7 +2512,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_code_instance_t *unspec = jl_atomic_load_relaxed(&unspecmi->cache); jl_callptr_t unspec_invoke = NULL; if (unspec && (unspec_invoke = jl_atomic_load_acquire(&unspec->invoke))) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0, NULL); void *unspec_fptr = jl_atomic_load_relaxed(&unspec->specptr.fptr); @@ -2548,7 +2539,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t compile_option == JL_OPTIONS_COMPILE_MIN) { jl_code_info_t *src = jl_code_for_interpreter(mi, world); if (!jl_code_requires_compiler(src, 0)) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0, NULL); jl_atomic_store_release(&codeinst->invoke, jl_fptr_interpret_call); @@ -2610,7 +2601,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_callptr_t ucache_invoke = jl_atomic_load_acquire(&ucache->invoke); if (ucache_invoke == NULL) { if ((!jl_is_method(def) || def->source == jl_nothing) && - !jl_cached_uninferred(jl_atomic_load_relaxed(&ucache->def->cache), world)) { + !jl_cached_uninferred(ucache->def, world)) { jl_throw(jl_new_struct(jl_missingcodeerror_type, (jl_value_t*)mi)); } jl_generate_fptr_for_unspecialized(ucache); @@ -2623,7 +2614,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_typeinf_timing_end(start, is_recompile); return ucache; } - codeinst = jl_new_codeinst(mi, jl_nothing, + codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0, NULL); void *unspec_fptr = jl_atomic_load_relaxed(&ucache->specptr.fptr); diff --git a/src/interpreter.c b/src/interpreter.c index a898a4a1ae34a..ab9731bc6966b 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -704,8 +704,7 @@ jl_value_t *jl_code_or_ci_for_interpreter(jl_method_instance_t *mi, size_t world ret = (jl_value_t*)src; } else { - jl_code_instance_t *cache = jl_atomic_load_relaxed(&mi->cache); - jl_code_instance_t *uninferred = jl_cached_uninferred(cache, world); + jl_code_instance_t *uninferred = jl_cached_uninferred(mi, world); if (!uninferred) { assert(mi->def.method->generator); src = jl_code_for_staged(mi, world, &uninferred); @@ -715,7 +714,7 @@ jl_value_t *jl_code_or_ci_for_interpreter(jl_method_instance_t *mi, size_t world } } else { - jl_code_instance_t *uninferred = jl_cached_uninferred(jl_atomic_load_relaxed(&mi->cache), world); + jl_code_instance_t *uninferred = jl_cached_uninferred(mi, world); ret = (jl_value_t*)uninferred; if (ret) { src = (jl_code_info_t*)jl_atomic_load_relaxed(&uninferred->inferred); diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index a5792de5eb501..143dee020c4b2 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -518,8 +518,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) } else { jl_method_instance_t *mi = unspec->def; - jl_code_instance_t *uninferred = jl_cached_uninferred( - jl_atomic_load_relaxed(&mi->cache), 1); + jl_code_instance_t *uninferred = jl_cached_uninferred(mi, 1); assert(uninferred); src = (jl_code_info_t*)jl_atomic_load_relaxed(&uninferred->inferred); assert(src); diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index 8a505e0ecd430..273f8e4347c44 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -86,6 +86,7 @@ XX(jl_method_instance_type) \ XX(jl_method_specialization_type) \ XX(jl_default_spec_type) \ + XX(jl_method_uninferred_spec_type) \ XX(jl_method_match_type) \ XX(jl_method_type) \ XX(jl_methtable_type) \ diff --git a/src/jltypes.c b/src/jltypes.c index 93776964060ed..dd4ce5c35c9b8 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3301,6 +3301,13 @@ void jl_init_types(void) JL_GC_DISABLED jl_bool_type), jl_emptysvec, 0, 0, 4); + jl_datatype_t *uninferred_spec_type = + jl_new_datatype(jl_symbol("UninferredSpec"), core, + jl_any_type, jl_emptysvec, + jl_emptysvec, + jl_emptysvec, + jl_emptysvec, 0, 0, 0); + tv = jl_svec1(tvar("D")); jl_datatype_t *jl_meth_spec_type = jl_new_datatype(jl_symbol("MethodSpecialization"), core, @@ -3313,7 +3320,7 @@ void jl_init_types(void) JL_GC_DISABLED "next", "data"), jl_svec(6, - jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), + jl_any_type, // Union{Method, Module, MethodSpecialization} jl_any_type, jl_array_any_type, jl_any_type,/*jl_code_instance_type*/ @@ -3333,9 +3340,8 @@ void jl_init_types(void) JL_GC_DISABLED jl_code_instance_type = jl_new_datatype(jl_symbol("CodeInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(18, + jl_perm_symsvec(17, "def", - "owner", "next", "min_world", "max_world", @@ -3349,10 +3355,9 @@ void jl_init_types(void) JL_GC_DISABLED "analysis_results", "specsigflags", "precompile", "relocatability", "invoke", "specptr"), // function object decls - jl_svec(18, + jl_svec(17, jl_method_specialization_type, jl_any_type, - jl_any_type, jl_ulong_type, jl_ulong_type, jl_any_type, @@ -3369,9 +3374,9 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_any_type), // fptrs jl_emptysvec, 0, 1, 1); - jl_svecset(jl_code_instance_type->types, 2, jl_code_instance_type); - const static uint32_t code_instance_constfields[1] = { 0b000001010011100011 }; // Set fields 1, 2, 6-8, 11, 13 as const - const static uint32_t code_instance_atomicfields[1] = { 0b110110101100011100 }; // Set fields 3-5, 9, 10, 12, 14-15, 17-18 as atomic + jl_svecset(jl_code_instance_type->types, 1, jl_code_instance_type); + const static uint32_t code_instance_constfields[1] = { 0b00000101001110001 }; // Set fields 1, 2, 6-8, 11, 13 as const + const static uint32_t code_instance_atomicfields[1] = { 0b11011010110001110 }; // Set fields 3-5, 9, 10, 12, 14-15, 17-18 as atomic //Fields 4-5 are only operated on by construction and deserialization, so are const at runtime //Fields 13 and 17 must be protected by locks, and thus all operations on jl_code_instance_t are threadsafe //Except for field 9 (inferred), which is volatile unless you know which other places are currently using it @@ -3513,10 +3518,13 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_methtable_type->types, 9, jl_uint8_type); jl_svecset(jl_methtable_type->types, 10, jl_uint8_type); //jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type) + jl_svecset(jl_meth_spec_type->types, 0, + jl_new_struct(jl_uniontype_type, jl_meth_spec_type, + jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type))); jl_svecset(jl_meth_spec_type->types, 3, jl_code_instance_type); jl_svecset(jl_meth_spec_type->types, 4, jl_method_specialization_type); + jl_svecset(jl_code_instance_type->types, 15, jl_voidpointer_type); jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type); - jl_svecset(jl_code_instance_type->types, 17, jl_voidpointer_type); jl_svecset(jl_binding_type->types, 1, jl_globalref_type); jl_svecset(jl_binding_type->types, 2, jl_binding_type); @@ -3539,6 +3547,8 @@ void jl_init_types(void) JL_GC_DISABLED jl_method_instance_type = (jl_datatype_t*)jl_apply_type1((jl_value_t*)jl_method_specialization_type, (jl_value_t*)jl_default_spec_type); jl_svecset(jl_method_type->types, 13, jl_method_instance_type); + jl_method_uninferred_spec_type = (jl_datatype_t*)jl_apply_type1((jl_value_t*)jl_method_specialization_type, (jl_value_t*)uninferred_spec_type); + jl_compute_field_offsets(jl_simplevector_type); jl_compute_field_offsets(jl_default_spec_type); jl_compute_field_offsets(jl_method_instance_type); diff --git a/src/julia.h b/src/julia.h index 6d2f216b72a66..ac7d1ca801b5b 100644 --- a/src/julia.h +++ b/src/julia.h @@ -427,7 +427,7 @@ struct _jl_method_instance_t { static inline jl_mi_default_spec_t *jl_mi_default_spec_data(jl_method_instance_t *mi) JL_NOTSAFEPOINT { return (jl_mi_default_spec_t*)(&mi[1]); -}; +} // OpaqueClosure typedef struct _jl_opaque_closure_t { @@ -443,7 +443,6 @@ typedef struct _jl_opaque_closure_t { typedef struct _jl_code_instance_t { JL_DATA_TYPE jl_method_instance_t *def; // method this is specialized from - jl_value_t *owner; // Compiler token this belongs to, `jl_nothing` is reserved for native _Atomic(struct _jl_code_instance_t*) next; // pointer to the next cache entry // world range for which this object is valid to use @@ -854,6 +853,7 @@ extern JL_DLLIMPORT jl_value_t *jl_bottom_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_default_spec_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_unionall_t *jl_method_specialization_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_method_instance_type JL_GLOBALLY_ROOTED; +extern JL_DLLIMPORT jl_datatype_t *jl_method_uninferred_spec_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_code_info_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_debuginfo_type JL_GLOBALLY_ROOTED; diff --git a/src/julia_internal.h b/src/julia_internal.h index 0e87eb5f07fe7..a34c426014f03 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -661,7 +661,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( - jl_method_instance_t *mi, jl_value_t *owner, + jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *exctype, jl_value_t *inferred_const, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, @@ -1032,7 +1032,7 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t*); JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module); JL_DLLEXPORT jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, size_t *min_valid, size_t *max_valid, int mt_cache); jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp); -JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); +JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); JL_DLLEXPORT jl_value_t *jl_rettype_inferred_native(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_code_instance_t *jl_method_inferred_with_abi(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT; @@ -1048,7 +1048,7 @@ JL_DLLEXPORT int jl_mi_try_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, jl_code_instance_t *expected_ci, jl_code_instance_t *ci JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); JL_DLLEXPORT int jl_mi_cache_has_ci(jl_method_instance_t *mi, jl_code_instance_t *ci) JL_NOTSAFEPOINT; -JL_DLLEXPORT jl_code_instance_t *jl_cached_uninferred(jl_code_instance_t *codeinst, size_t world); +JL_DLLEXPORT jl_code_instance_t *jl_cached_uninferred(jl_method_instance_t *uninferred_mi, size_t world); JL_DLLEXPORT jl_code_instance_t *jl_cache_uninferred(jl_method_instance_t *mi, jl_code_instance_t *checked, size_t world, jl_code_instance_t *newci JL_MAYBE_UNROOTED); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_for_uninferred(jl_method_instance_t *mi, jl_code_info_t *src); JL_DLLEXPORT extern jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t min_world, size_t max_world) JL_NOTSAFEPOINT; diff --git a/src/method.c b/src/method.c index 3f8b997b9c452..f1a84bcd1e43c 100644 --- a/src/method.c +++ b/src/method.c @@ -700,13 +700,15 @@ JL_DLLEXPORT jl_code_info_t *jl_expand_and_resolve(jl_value_t *ex, jl_module_t * return func; } -JL_DLLEXPORT jl_code_instance_t *jl_cached_uninferred(jl_code_instance_t *codeinst, size_t world) +JL_DLLEXPORT jl_code_instance_t *jl_cached_uninferred(jl_method_instance_t *mi, size_t world) { - for (; codeinst; codeinst = jl_atomic_load_relaxed(&codeinst->next)) { - if (codeinst->owner != (void*)jl_uninferred_sym) + for (; mi; mi = jl_atomic_load_relaxed(&mi->next)) { + if (jl_typeof(mi) != (jl_value_t*)jl_method_uninferred_spec_type) continue; - if (jl_atomic_load_relaxed(&codeinst->min_world) <= world && world <= jl_atomic_load_relaxed(&codeinst->max_world)) { - return codeinst; + for (jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); codeinst; codeinst = jl_atomic_load_relaxed(&codeinst->next)) { + if (jl_atomic_load_relaxed(&codeinst->min_world) <= world && world <= jl_atomic_load_relaxed(&codeinst->max_world)) { + return codeinst; + } } } return NULL; @@ -714,26 +716,40 @@ JL_DLLEXPORT jl_code_instance_t *jl_cached_uninferred(jl_code_instance_t *codein JL_DLLEXPORT jl_code_instance_t *jl_cache_uninferred(jl_method_instance_t *mi, jl_code_instance_t *checked, size_t world, jl_code_instance_t *newci) { - while (!jl_mi_try_insert(mi, checked, newci)) { - jl_code_instance_t *new_checked = jl_atomic_load_relaxed(&mi->cache); + jl_method_instance_t *lastmi = mi; + jl_method_instance_t *uninferred_mi = mi; + while (1) { + for (; uninferred_mi; uninferred_mi = jl_atomic_load_relaxed(&uninferred_mi->next)) { + lastmi = uninferred_mi; + if (jl_typeof(uninferred_mi) != (jl_value_t*)jl_method_uninferred_spec_type) + continue; + } + if (uninferred_mi) { + break; + } + jl_method_instance_t *newmi = (jl_method_instance_t*)jl_new_struct_uninit(jl_method_uninferred_spec_type); + newmi->def = mi->def; + newmi->specTypes = mi->specTypes; + if (jl_atomic_cmpswap_acqrel(&lastmi->next, &uninferred_mi, newmi)) { + uninferred_mi = newmi; + break; + } + } + while (!jl_mi_try_insert(uninferred_mi, NULL, newci)) { // Check if another thread inserted a CodeInstance that covers this world - jl_code_instance_t *other = jl_cached_uninferred(new_checked, world); + jl_code_instance_t *other = jl_cached_uninferred(uninferred_mi, world); if (other) return other; - checked = new_checked; } // Successfully inserted return newci; } - - // Return a newly allocated CodeInfo for the function signature // effectively described by the tuple (specTypes, env, Method) inside linfo JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t world, jl_code_instance_t **cache) { - jl_code_instance_t *cache_ci = jl_atomic_load_relaxed(&mi->cache); - jl_code_instance_t *uninferred_ci = jl_cached_uninferred(cache_ci, world); + jl_code_instance_t *uninferred_ci = jl_cached_uninferred(mi, world); if (uninferred_ci) { // The uninferred code is in `inferred`, but that is a bit of a misnomer here. // This is the cached output the generated function (or top-level thunk). @@ -825,7 +841,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t } } - jl_code_instance_t *cached_ci = jl_cache_uninferred(mi, cache_ci, world, ci); + jl_code_instance_t *cached_ci = jl_cache_uninferred(mi, NULL, world, ci); if (cached_ci != ci) { func = (jl_code_info_t*)jl_copy_ast(jl_atomic_load_relaxed(&cached_ci->inferred)); assert(jl_is_code_info(func)); diff --git a/src/opaque_closure.c b/src/opaque_closure.c index c9f94f6b15746..15092dcc955cc 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -149,7 +149,7 @@ JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tuplet if (isinferred) { sigtype = jl_argtype_with_function(env, (jl_value_t*)argt); jl_method_instance_t *mi = jl_specializations_get_linfo((jl_method_t*)root, sigtype, jl_emptysvec); - inst = jl_new_codeinst(mi, jl_nothing, rt_ub, (jl_value_t*)jl_any_type, NULL, (jl_value_t*)ci, + inst = jl_new_codeinst(mi, rt_ub, (jl_value_t*)jl_any_type, NULL, (jl_value_t*)ci, 0, world, world, 0, 0, jl_nothing, 0, ci->debuginfo); jl_mi_cache_insert(mi, inst); } diff --git a/src/precompile_utils.c b/src/precompile_utils.c index d5f70dea5aa59..2258acab260ff 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -182,10 +182,7 @@ static int precompile_enq_specialization_(jl_method_instance_t *mi, void *closur jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { int do_compile = 0; - if (codeinst->owner != jl_nothing) { - // TODO(vchuravy) native code caching for foreign interpreters - } - else if (jl_atomic_load_relaxed(&codeinst->invoke) != jl_fptr_const_return) { + if (jl_atomic_load_relaxed(&codeinst->invoke) != jl_fptr_const_return) { jl_value_t *inferred = jl_atomic_load_relaxed(&codeinst->inferred); if (inferred && inferred != jl_nothing && diff --git a/src/rtutils.c b/src/rtutils.c index cbe14464fb286..3ae5600d5a5db 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -818,7 +818,7 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt n += jl_static_show_x(out, (jl_value_t*)li->def.module, depth, ctx); n += jl_printf(out, ". -> "); n += jl_static_show_x(out, jl_atomic_load_relaxed(&jl_cached_uninferred( - jl_atomic_load_relaxed(&li->cache), 1)->inferred), depth, ctx); + li, 1)->inferred), depth, ctx); } } else if (vt == jl_typename_type) { diff --git a/src/staticdata.c b/src/staticdata.c index 281c8274bfe57..492a51c616fa4 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -100,7 +100,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 192 +#define NUM_TAGS 193 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -129,6 +129,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_default_spec_type); INSERT_TAG(jl_method_specialization_type); INSERT_TAG(jl_method_instance_type); + INSERT_TAG(jl_method_uninferred_spec_type); INSERT_TAG(jl_method_type); INSERT_TAG(jl_code_instance_type); INSERT_TAG(jl_linenumbernode_type); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index e3164e6b292e1..e1c2b41c5af16 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -1204,7 +1204,7 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_a assert(jl_atomic_load_relaxed(&ci->min_world) == 1); assert(jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0); jl_method_instance_t *caller = ci->def; - if (jl_atomic_load_relaxed(&ci->inferred) && jl_rettype_inferred(ci->owner, caller, minworld, ~(size_t)0) == jl_nothing) { + if (jl_atomic_load_relaxed(&ci->inferred) && jl_rettype_inferred(caller, minworld, ~(size_t)0) == jl_nothing) { jl_mi_cache_insert(caller, ci); } //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); @@ -1251,16 +1251,13 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_a jl_code_instance_t *next_ci = jl_atomic_load_relaxed(&codeinst->next); jl_atomic_store_relaxed(&codeinst->next, NULL); - jl_value_t *owner = codeinst->owner; - JL_GC_PROMISE_ROOTED(owner); - assert(jl_atomic_load_relaxed(&codeinst->min_world) == minworld); // See #53586, #53109 // assert(jl_atomic_load_relaxed(&codeinst->max_world) == WORLD_AGE_REVALIDATION_SENTINEL); assert(jl_atomic_load_relaxed(&codeinst->inferred)); jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); - if (jl_rettype_inferred(owner, caller, minworld, maxvalid) != jl_nothing) { + if (jl_rettype_inferred(caller, minworld, maxvalid) != jl_nothing) { // We already got a code instance for this world age range from somewhere else - we don't need // this one. } else { diff --git a/src/toplevel.c b/src/toplevel.c index 1899c9e18db30..3d70afcda9fe2 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -601,7 +601,7 @@ int jl_needs_lowering(jl_value_t *e) JL_NOTSAFEPOINT JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_for_uninferred(jl_method_instance_t *mi, jl_code_info_t *src) { // Do not compress this, we expect it to be shortlived. - jl_code_instance_t *ci = jl_new_codeinst(mi, (jl_value_t*)jl_uninferred_sym, + jl_code_instance_t *ci = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, jl_nothing, (jl_value_t*)src, 0, src->min_world, src->max_world, 0, 0, NULL, 1, NULL); @@ -613,11 +613,16 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_instance_for_thunk(jl_code_info_t * jl_method_instance_t *mi = jl_new_method_instance_uninit(); mi->specTypes = (jl_value_t*)jl_emptytuple_type; mi->def.module = module; - JL_GC_PUSH1(&mi); + jl_method_instance_t *uninferred_mi = NULL; + JL_GC_PUSH2(&mi, &uninferred_mi); + + uninferred_mi = (jl_method_instance_t *)jl_new_struct_uninit(jl_method_uninferred_spec_type); + jl_atomic_store_relaxed(&mi->next, uninferred_mi); + jl_gc_wb(mi, uninferred_mi); jl_code_instance_t *ci = jl_new_codeinst_for_uninferred(mi, src); - jl_atomic_store_relaxed(&mi->cache, ci); - jl_gc_wb(mi, ci); + jl_atomic_store_relaxed(&uninferred_mi->cache, ci); + jl_gc_wb(uninferred_mi, ci); JL_GC_POP(); return mi; From 5d856dcfe81df56334433d1b9f58330050df3dd9 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 7 May 2024 14:54:17 +0000 Subject: [PATCH 4/6] Fixes --- base/deprecated.jl | 15 +++++++++++++++ src/jltypes.c | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/base/deprecated.jl b/base/deprecated.jl index 4de675028f6cc..1003183f7f36d 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -505,3 +505,18 @@ end @deprecate invpermute!!(a, p::AbstractVector{<:Integer}) invpermute!(a, p) false # END 1.11 deprecations + +# BEGIN 1.12 deprecations + +# This interface is internal, but relied on in some packages, so +# this allows for smoother upgrades. To be removed when packages have +# migrated. +function Base.getproperty(mi::MethodInstance, s::Symbol) + if s === :sparam_vals + return getfield(getfield(mi, :data), s) + else + return getfield(mi, s) + end +end + +# END 1.12 deprecations diff --git a/src/jltypes.c b/src/jltypes.c index dd4ce5c35c9b8..fe74f363c3193 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3519,7 +3519,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_methtable_type->types, 10, jl_uint8_type); //jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type) jl_svecset(jl_meth_spec_type->types, 0, - jl_new_struct(jl_uniontype_type, jl_meth_spec_type, + jl_new_struct(jl_uniontype_type, jl_method_specialization_type, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type))); jl_svecset(jl_meth_spec_type->types, 3, jl_code_instance_type); jl_svecset(jl_meth_spec_type->types, 4, jl_method_specialization_type); From 9ba6ef7162bd7082220e5460461c952baedc886d Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 7 May 2024 21:14:59 +0000 Subject: [PATCH 5/6] InternalCodeCache update --- base/boot.jl | 10 ++++++---- base/compiler/cicache.jl | 42 +++++++++++++++++++++++++++++++++------ base/compiler/compiler.jl | 2 ++ src/codegen.cpp | 2 +- src/jltypes.c | 2 +- src/julia.h | 1 + 6 files changed, 47 insertions(+), 12 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 497e80c7b519e..235dc955bf9e1 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -501,9 +501,8 @@ struct LineInfoNode # legacy support for aiding Serializer.deserialize of old IR LineInfoNode(mod::Module, @nospecialize(method), file::Symbol, line::Int32, inlined_at::Int32) = new(mod, method, file, line, inlined_at) end - function CodeInstance( - mi::MethodInstance, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), + mi::MethodSpecialization, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), @nospecialize(inferred), const_flags::Int32, min_world::UInt, max_world::UInt, ipo_effects::UInt32, effects::UInt32, @nospecialize(analysis_results), relocatability::UInt8, edges::DebugInfo) @@ -649,12 +648,12 @@ Symbol(s::Symbol) = s # module providing the IR object model module IR -export CodeInfo, MethodInstance, CodeInstance, GotoNode, GotoIfNot, ReturnNode, +export CodeInfo, MethodSpecialization, MethodInstance, CodeInstance, GotoNode, GotoIfNot, ReturnNode, NewvarNode, SSAValue, SlotNumber, Argument, PiNode, PhiNode, PhiCNode, UpsilonNode, DebugInfo, Const, PartialStruct, InterConditional, EnterNode -using Core: CodeInfo, MethodInstance, CodeInstance, GotoNode, GotoIfNot, ReturnNode, +using Core: CodeInfo, MethodSpecialization, MethodInstance, CodeInstance, GotoNode, GotoIfNot, ReturnNode, NewvarNode, SSAValue, SlotNumber, Argument, PiNode, PhiNode, PhiCNode, UpsilonNode, DebugInfo, Const, PartialStruct, InterConditional, EnterNode @@ -1006,6 +1005,9 @@ const check_top_bit = check_sign_bit EnterNode(old::EnterNode, new_dest::Int) = isdefined(old, :scope) ? EnterNode(new_dest, old.scope) : EnterNode(new_dest) +eval(Core, :((MS::Type{<:MethodSpecialization})(def::Union{Method, Module, MethodSpecialization}, abi::Type{<:Tuple}) = + $(Expr(:new, :MS, :def, :abi)))) + include(Core, "optimized_generics.jl") ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Core, true) diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index 777ad819fb612..d2442e9a8cf5d 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -3,15 +3,35 @@ """ struct InternalCodeCache -Internally, each `MethodInstance` keep a unique global cache of code instances -that have been created for the given method instance, stratified by world age -ranges. This struct abstracts over access to this cache. +The internal code cache is keyed on type specializations, represented by +MethodSpecialization{DefaultSpec} aka MethodInstance. External abstract +interpreters may use this same structure by using a different `Spec` for their +`MethodSpecialization{Spec}`. `InternalCodeCache` will match such specializations +by type. Additionally, it is possible to specialize methods on properties other +than types, but this requires custom caching logic. `InternalCodeCache` currently +only supports type-based specialization. """ struct InternalCodeCache + mitype::DataType # <: MethodSpecialization, but stored as DataType for efficient === + InternalCodeCache(T::Type{<:MethodSpecialization}) = + new(T) end +InternalCodeCache() = InternalCodeCache(MethodInstance) function setindex!(cache::InternalCodeCache, ci::CodeInstance, mi::MethodInstance) - ccall(:jl_mi_cache_insert, Cvoid, (Any, Any), mi, ci) + ms::MethodSpecialization = mi + while typeof(ms) !== cache.mitype + if !isdefined(ms, :next) + # No specialization for this spec. Try to allocate it now. + newms = cache.mitype(mi.def, mi.specTypes) + if @atomiconce :sequentially_consistent (ms.next = newms) + ms = newms + break + end + end + ms = @atomic :acquire ms.next + end + ccall(:jl_mi_cache_insert, Cvoid, (Any, Any), ms, ci) return cache end @@ -48,11 +68,21 @@ WorldView(wvc::WorldView, wr::WorldRange) = WorldView(wvc.cache, wr) WorldView(wvc::WorldView, args...) = WorldView(wvc.cache, args...) function haskey(wvc::WorldView{InternalCodeCache}, mi::MethodInstance) - return ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) !== nothing + ms::MethodSpecialization = mi + while typeof(ms) !== wvc.cache.mitype + isdefined(ms, :next) || return false + ms = ms.next + end + return ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), ms, first(wvc.worlds), last(wvc.worlds)) !== nothing end function get(wvc::WorldView{InternalCodeCache}, mi::MethodInstance, default) - r = ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) + ms::MethodSpecialization = mi + while typeof(ms) !== wvc.cache.mitype + isdefined(ms, :next) || return default + ms = ms.next + end + r = ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), ms, first(wvc.worlds), last(wvc.worlds)) if r === nothing return default end diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index 12d6d5eb38764..b30359f49a408 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -6,11 +6,13 @@ using Core.Intrinsics, Core.IR import Core: print, println, show, write, unsafe_write, stdout, stderr, _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, + MethodSpecialization, MethodInstance, CodeInstance, MethodTable, MethodMatch, PartialOpaque, TypeofVararg const getproperty = Core.getfield const setproperty! = Core.setfield! +const setpropertyonce! = Core.setfieldonce! const swapproperty! = Core.swapfield! const modifyproperty! = Core.modifyfield! const replaceproperty! = Core.replacefield! diff --git a/src/codegen.cpp b/src/codegen.cpp index c8efeb75d85ed..b2fe0b096e0ec 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5162,7 +5162,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR jl_cgval_t result; if (lival.constant) { jl_method_instance_t *mi = (jl_method_instance_t*)lival.constant; - assert(jl_is_method_instance(mi)); + assert(jl_is_method_specialization(mi)); if (mi == ctx.linfo) { // handle self-recursion specially jl_returninfo_t::CallingConv cc = jl_returninfo_t::CallingConv::Boxed; diff --git a/src/jltypes.c b/src/jltypes.c index fe74f363c3193..4a4e42e49d2ed 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3332,7 +3332,7 @@ void jl_init_types(void) JL_GC_DISABLED (jl_unionall_t*)jl_meth_spec_type->name->wrapper; // These fields should be constant, but Serialization wants to mutate them in initialization //const static uint32_t method_instance_constfields[1] = { 0x00000007 }; // (1<<0)|(1<<1); - const static uint32_t method_instance_atomicfields[1] = { 0x0000008 }; // (1<<3) + const static uint32_t method_instance_atomicfields[1] = { 0x0000018 }; // (1<<3)|(1<<4) //Fields 3 and 4 must be protected by method->write_lock, and thus all operations on jl_method_instance_t are threadsafe. TODO: except inInference //jl_method_instance_type->name->constfields = method_instance_constfields; jl_meth_spec_type->name->atomicfields = method_instance_atomicfields; diff --git a/src/julia.h b/src/julia.h index ac7d1ca801b5b..762de864cf678 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1528,6 +1528,7 @@ static inline int jl_field_isconst(jl_datatype_t *st, int i) JL_NOTSAFEPOINT #define jl_is_newvarnode(v) jl_typetagis(v,jl_newvarnode_type) #define jl_is_linenode(v) jl_typetagis(v,jl_linenumbernode_type) #define jl_is_method_instance(v) jl_typetagis(v,jl_method_instance_type) +#define jl_is_method_specialization(v) jl_isa((jl_value_t*)v,(jl_value_t*)jl_method_specialization_type) #define jl_is_code_instance(v) jl_typetagis(v,jl_code_instance_type) #define jl_is_code_info(v) jl_typetagis(v,jl_code_info_type) #define jl_is_method(v) jl_typetagis(v,jl_method_type) From cb6630053acb707da969b9b495e649348385e23c Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 14 May 2024 04:54:53 +0000 Subject: [PATCH 6/6] fixup --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 70bf44bb584ae..3463a0cf7a16b 100644 --- a/src/Makefile +++ b/src/Makefile @@ -547,7 +547,7 @@ $(addprefix clang-tidy-,$(CODEGEN_SRCS)): DEBUGFLAGS_CLANG += -DJL_LIBRARY_EXPOR # Add C files as a target of `analyzesrc` and `analyzegc` and `tidysrc` tidysrc: $(addprefix clang-tidy-,$(CODEGEN_SRCS) $(SRCS)) analyzesrc: $(addprefix clang-sa-,$(CODEGEN_SRCS) $(SRCS)) -analyzegc: $(addprefix clang-sagc-,$(filter-out $(basename $(SKIP_GC_CHECK)),$(SRCS))) +analyzegc: $(addprefix clang-sagc-,$(filter-out $(basename $(SKIP_GC_CHECK)),$(CODEGEN_SRCS) $(SRCS))) analyze: analyzesrc analyzegc tidysrc clean-analyzegc: