diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index fe3983dfa1ef2..8c7f38e8d4342 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2345,7 +2345,7 @@ function abstract_finalizer(interp::AbstractInterpreter, argtypes::Vector{Any}, finalizer_argvec = Any[argtypes[2], argtypes[3]] call = abstract_call(interp, ArgInfo(nothing, finalizer_argvec), StmtInfo(false, false), sv, #=max_methods=#1)::Future return Future{CallMeta}(call, interp, sv) do call, interp, sv - return CallMeta(Nothing, Any, Effects(), FinalizerInfo(call.info, call.effects)) + return CallMeta(Nothing, Any, Effects(), IndirectCallInfo(call.info, call.effects, false)) end end return Future(CallMeta(Nothing, Any, Effects(), NoCallInfo())) @@ -2679,6 +2679,8 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return Future(abstract_eval_isdefinedglobal(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.get_binding_type return Future(abstract_eval_get_binding_type(interp, sv, argtypes)) + elseif f === Core._task + return abstract_eval_task_builtin(interp, arginfo, si, sv) end rt = abstract_call_builtin(interp, f, arginfo, sv) ft = popfirst!(argtypes) @@ -3208,6 +3210,58 @@ function abstract_eval_splatnew(interp::AbstractInterpreter, e::Expr, sstate::St return RTEffects(rt, Any, effects) end +function abstract_eval_task_builtin(interp::AbstractInterpreter, arginfo::ArgInfo, si::StmtInfo, sv::AbsIntState) + (; fargs, argtypes) = arginfo + la = length(argtypes) + ๐•ƒแตข = typeinf_lattice(interp) + if !isempty(argtypes) && !isvarargtype(argtypes[end]) + if !(3 <= la <= 4) + return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + end + elseif isempty(argtypes) || la > 5 + return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + end + size_arg = argtypes[3] + if !hasintersect(widenconst(size_arg), Int) + return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + end + func_arg = argtypes[2] + + # Handle optional Method/CodeInstance/Type argument (4th parameter) as invoke + if la == 4 + invoke_args = Any[Const(Core.invoke), func_arg, argtypes[4]] + invoke_arginfo = ArgInfo(nothing, invoke_args) + invoke_future = abstract_invoke(interp, invoke_arginfo, si, sv) + return Future{CallMeta}(invoke_future, interp, sv) do invoke_result, interp, sv + fetch_type = widenconst(invoke_result.rt) + fetch_error = widenconst(invoke_result.exct) + task_effects = invoke_result.effects + if fetch_type === Any && fetch_error === Any + rt_result = Task + else + rt_result = PartialTask(fetch_type, fetch_error) + end + info_result = IndirectCallInfo(invoke_result.info, task_effects, true) + return CallMeta(rt_result, Any, Effects(), info_result) + end + end + + # Otherwise use abstract_call for function analysis + callinfo_future = abstract_call(interp, ArgInfo(nothing, Any[func_arg]), StmtInfo(true, si.saw_latestworld), sv, #=max_methods=#1) + return Future{CallMeta}(callinfo_future, interp, sv) do callinfo, interp, sv + fetch_type = widenconst(callinfo.rt) + fetch_error = widenconst(callinfo.exct) + task_effects = callinfo.effects + if fetch_type === Any && fetch_error === Any + rt_result = Task + else + rt_result = PartialTask(fetch_type, fetch_error) + end + info_result = IndirectCallInfo(callinfo.info, task_effects, true) + return CallMeta(rt_result, Any, Effects(), info_result) + end +end + function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) ๐•ƒแตข = typeinf_lattice(interp) @@ -4030,6 +4084,8 @@ end fields[i] = a end anyrefine && return PartialStruct(๐•ƒแตข, rt.typ, _getundefs(rt), fields) + elseif isa(rt, PartialTask) + return rt # already widened, by construction end if isa(rt, PartialOpaque) return rt # XXX: this case was missed in #39512 diff --git a/Compiler/src/abstractlattice.jl b/Compiler/src/abstractlattice.jl index 4d0accedfc765..c8485174580ea 100644 --- a/Compiler/src/abstractlattice.jl +++ b/Compiler/src/abstractlattice.jl @@ -24,13 +24,13 @@ is_valid_lattice_norec(::ConstsLattice, @nospecialize(elem)) = isa(elem, Const) """ struct PartialsLattice{๐•ƒ<:AbstractLattice} <: AbstractLattice -A lattice extending a base lattice `๐•ƒ` and adjoining `PartialStruct` and `PartialOpaque`. +A lattice extending a base lattice `๐•ƒ` and adjoining `PartialStruct`, `PartialOpaque`, and `PartialTask`. """ struct PartialsLattice{๐•ƒ<:AbstractLattice} <: AbstractLattice parent::๐•ƒ end widenlattice(๐•ƒ::PartialsLattice) = ๐•ƒ.parent -is_valid_lattice_norec(::PartialsLattice, @nospecialize(elem)) = isa(elem, PartialStruct) || isa(elem, PartialOpaque) +is_valid_lattice_norec(::PartialsLattice, @nospecialize(elem)) = isa(elem, PartialStruct) || isa(elem, PartialOpaque) || isa(elem, PartialTask) """ struct ConditionalsLattice{๐•ƒ<:AbstractLattice} <: AbstractLattice @@ -191,6 +191,7 @@ information that would not be available from the type itself. @nospecializeinfer function has_nontrivial_extended_info(๐•ƒ::PartialsLattice, @nospecialize t) isa(t, PartialStruct) && return true isa(t, PartialOpaque) && return true + isa(t, PartialTask) && return true return has_nontrivial_extended_info(widenlattice(๐•ƒ), t) end @nospecializeinfer function has_nontrivial_extended_info(๐•ƒ::ConstsLattice, @nospecialize t) @@ -223,6 +224,7 @@ that should be forwarded along with constant propagation. # return false end isa(t, PartialOpaque) && return true + isa(t, PartialTask) && return true return is_const_prop_profitable_arg(widenlattice(๐•ƒ), t) end @nospecializeinfer function is_const_prop_profitable_arg(๐•ƒ::ConstsLattice, @nospecialize t) @@ -246,6 +248,7 @@ end @nospecializeinfer function is_forwardable_argtype(๐•ƒ::PartialsLattice, @nospecialize x) isa(x, PartialStruct) && return true isa(x, PartialOpaque) && return true + isa(x, PartialTask) && return true return is_forwardable_argtype(widenlattice(๐•ƒ), x) end @nospecializeinfer function is_forwardable_argtype(๐•ƒ::ConstsLattice, @nospecialize x) diff --git a/Compiler/src/ssair/inlining.jl b/Compiler/src/ssair/inlining.jl index 324b43db3e797..ba4c03ca5d04a 100644 --- a/Compiler/src/ssair/inlining.jl +++ b/Compiler/src/ssair/inlining.jl @@ -1219,6 +1219,37 @@ function narrow_opaque_closure!(ir::IRCode, stmt::Expr, @nospecialize(info::Call stmt.args[3] = newT end end + return nothing +end + +function handle_task_call!(ir::IRCode, idx::Int, stmt::Expr, info::IndirectCallInfo, state::InliningState) + length(stmt.args) == 3 || return + # Extract the CodeInstance from the inference result if available + info_edge = extract_indirect_invoke(info; check_fully_covers=false) + info_edge === nothing && return nothing + info, edge = info_edge + case = compileable_specialization(edge, Effects(), InliningEdgeTracker(state), info, state) + case === nothing && return nothing + # Append the CodeInstance as a third argument to the _task call + # Core._task(func, size) becomes Core._tak(func, size, ci) + push!(stmt.args, case.invoke) + ir[SSAValue(idx)][:stmt] = stmt + return nothing +end + +function extract_indirect_invoke(info::IndirectCallInfo; check_fully_covers::Bool) + info = info.info + info isa MethodResultPure && (info = info.info) + info isa ConstCallInfo && (info = info.call) + info isa MethodMatchInfo || return nothing + length(info.edges) == length(info.results) == 1 || return nothing + match = info.results[1]::MethodMatch + if check_fully_covers + match.fully_covers || return nothing + end + edge = info.edges[1] + edge === nothing && return nothing + return info, edge end # As a matter of convenience, this pass also computes effect-freenes. @@ -1288,7 +1319,8 @@ function process_simple!(todo::Vector{Pair{Int,Any}}, ir::IRCode, idx::Int, flag f !== modifyfield! && f !== Core.modifyglobal! && f !== Core.memoryrefmodify! && - f !== atomic_pointermodify) + f !== atomic_pointermodify && + f !== Core._task) # No inlining defined for most builtins (just invoke/apply/typeassert/finalizer), so attempt an early exit for them return nothing end @@ -1538,16 +1570,10 @@ function handle_opaque_closure_call!(todo::Vector{Pair{Int,Any}}, return nothing end -function handle_modifyop!_call!(ir::IRCode, idx::Int, stmt::Expr, info::ModifyOpInfo, state::InliningState) - info = info.info - info isa MethodResultPure && (info = info.info) - info isa ConstCallInfo && (info = info.call) - info isa MethodMatchInfo || return nothing - length(info.edges) == length(info.results) == 1 || return nothing - match = info.results[1]::MethodMatch - match.fully_covers || return nothing - edge = info.edges[1] - edge === nothing && return nothing +function handle_modifyop!_call!(ir::IRCode, idx::Int, stmt::Expr, info::IndirectCallInfo, state::InliningState) + info_edge = extract_indirect_invoke(info; check_fully_covers=true) + info_edge === nothing && return nothing + info, edge = info_edge case = compileable_specialization(edge, Effects(), InliningEdgeTracker(state), info, state) case === nothing && return nothing stmt.head = :invoke_modify @@ -1556,7 +1582,7 @@ function handle_modifyop!_call!(ir::IRCode, idx::Int, stmt::Expr, info::ModifyOp return nothing end -function handle_finalizer_call!(ir::IRCode, idx::Int, stmt::Expr, info::FinalizerInfo, +function handle_finalizer_call!(ir::IRCode, idx::Int, stmt::Expr, info::IndirectCallInfo, state::InliningState) # Finalizers don't return values, so if their execution is not observable, # we can just not register them @@ -1649,14 +1675,22 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) end # handle special cased builtins + f = sig.f if isa(info, OpaqueClosureCallInfo) handle_opaque_closure_call!(todo, ir, idx, stmt, info, flag, sig, state) - elseif isa(info, ModifyOpInfo) - handle_modifyop!_call!(ir, idx, stmt, info, state) - elseif sig.f === Core.invoke + elseif isa(info, IndirectCallInfo) + if f === Core.finalizer + handle_finalizer_call!(ir, idx, stmt, info, state) + elseif f === modifyfield! || + f === Core.modifyglobal! || + f === Core.memoryrefmodify! || + f === atomic_pointermodify + handle_modifyop!_call!(ir, idx, stmt, info, state) + elseif f === Core._task + handle_task_call!(ir, idx, stmt, info, state) + end + elseif f === Core.invoke handle_invoke_call!(todo, ir, idx, stmt, info, flag, sig, state) - elseif isa(info, FinalizerInfo) - handle_finalizer_call!(ir, idx, stmt, info, state) else # cascade to the generic (and extendable) handler handle_call!(todo, ir, idx, stmt, info, flag, sig, state) @@ -1718,6 +1752,8 @@ function early_inline_special_case(ir::IRCode, stmt::Expr, flag::UInt32, elseif โŠ‘(optimizer_lattice(state.interp), cond, Bool) && stmt.args[3] === stmt.args[4] return SomeCase(stmt.args[3]) end + elseif f === Core.task_result_type + return SomeCase(quoted(instanceof_tfunc(type)[1])) end return nothing end diff --git a/Compiler/src/ssair/passes.jl b/Compiler/src/ssair/passes.jl index b8b4a6dd4d655..0ef17ecdd8eee 100644 --- a/Compiler/src/ssair/passes.jl +++ b/Compiler/src/ssair/passes.jl @@ -1298,7 +1298,7 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing) elseif is_known_call(stmt, Core.finalizer, compact) 3 <= length(stmt.args) <= 5 || continue info = compact[SSAValue(idx)][:info] - if isa(info, FinalizerInfo) + if isa(info, IndirectCallInfo) is_finalizer_inlineable(info.effects) || continue else # Inlining performs legality checks on the finalizer to determine @@ -1673,7 +1673,7 @@ function try_resolve_finalizer!(ir::IRCode, alloc_idx::Int, finalizer_idx::Int, finalizer_stmt = ir[SSAValue(finalizer_idx)][:stmt] argexprs = Any[finalizer_stmt.args[2], finalizer_stmt.args[3]] - flag = info isa FinalizerInfo ? flags_for_effects(info.effects) : IR_FLAG_NULL + flag = isa(info, IndirectCallInfo) ? flags_for_effects(info.effects) : IR_FLAG_NULL if length(finalizer_stmt.args) >= 4 inline = finalizer_stmt.args[4] if inline === nothing diff --git a/Compiler/src/stmtinfo.jl b/Compiler/src/stmtinfo.jl index 525fe0cc222b7..5cfa54ca74332 100644 --- a/Compiler/src/stmtinfo.jl +++ b/Compiler/src/stmtinfo.jl @@ -454,33 +454,31 @@ end add_edges_impl(edges::Vector{Any}, info::ReturnTypeCallInfo) = add_edges!(edges, info.info) """ - info::FinalizerInfo <: CallInfo - -Represents the information of a potential (later) call to the finalizer on the given -object type. -""" -struct FinalizerInfo <: CallInfo - info::CallInfo # the callinfo for the finalizer call - effects::Effects # the effects for the finalizer call -end -# merely allocating a finalizer does not imply edges (unless it gets inlined later) -add_edges_impl(::Vector{Any}, ::FinalizerInfo) = nothing - -""" - info::ModifyOpInfo <: CallInfo - -Represents a resolved call of one of: - - `modifyfield!(obj, name, op, x, [order])` - - `modifyglobal!(mod, var, op, x, order)` - - `memoryrefmodify!(memref, op, x, order, boundscheck)` - - `Intrinsics.atomic_pointermodify(ptr, op, x, order)` - -`info.info` wraps the call information of `op(getval(), x)`. -""" -struct ModifyOpInfo <: CallInfo - info::CallInfo # the callinfo for the `op(getval(), x)` call + info::IndirectCallInfo <: CallInfo + +Represents information about a call that involves an indirect/nested function call. +Used for: + - `modifyfield!(obj, name, op, x, [order])` where `op(getval(), x)` is called + - `modifyglobal!(mod, var, op, x, order)` where `op(getval(), x)` is called + - `memoryrefmodify!(memref, op, x, order, boundscheck)` where `op(getval(), x)` is called + - `Intrinsics.atomic_pointermodify(ptr, op, x, order)` where `op(getval(), x)` is called + - `Core._task(f, size)` where `f()` will be called when the task runs + - `Core.finalizer(f, obj)` where `f(obj)` will be called during garbage collection + +Contains the `CallInfo` for the indirect function call, its effects, and whether +the indirect call should contribute edges for invalidation tracking. +""" +struct IndirectCallInfo <: CallInfo + info::CallInfo # the callinfo for the indirect function call + effects::Effects # the effects for the indirect function call + add_edges::Bool # whether to add edges for invalidation tracking +end +function add_edges_impl(edges::Vector{Any}, info::IndirectCallInfo) + if info.add_edges + add_edges!(edges, info.info) + end + # otherwise add no edges (e.g., for finalizers that don't imply edges unless inlined) end -add_edges_impl(edges::Vector{Any}, info::ModifyOpInfo) = add_edges!(edges, info.info) struct VirtualMethodMatchInfo <: CallInfo info::Union{MethodMatchInfo,UnionSplitInfo,InvokeCallInfo} diff --git a/Compiler/src/tfuncs.jl b/Compiler/src/tfuncs.jl index 71719d75144b3..9659641577174 100644 --- a/Compiler/src/tfuncs.jl +++ b/Compiler/src/tfuncs.jl @@ -579,6 +579,7 @@ add_tfunc(Core.sizeof, 1, 1, sizeof_tfunc, 1) end add_tfunc(nfields, 1, 1, nfields_tfunc, 1) add_tfunc(Core._expr, 1, INT_INF, @nospecs((๐•ƒ::AbstractLattice, args...)->Expr), 100) + add_tfunc(svec, 0, INT_INF, @nospecs((๐•ƒ::AbstractLattice, args...)->SimpleVector), 20) @nospecs function _svec_ref_tfunc(๐•ƒ::AbstractLattice, s, i) if isa(s, Const) && isa(i, Const) @@ -1143,6 +1144,8 @@ end end end s00 = s + elseif isa(s00, PartialTask) + s00 = Task end return _getfield_tfunc(widenlattice(๐•ƒ), s00, name, setfield) end @@ -1436,7 +1439,7 @@ end elseif isconcretetype(RT) && has_nontrivial_extended_info(๐•ƒแตข, TF2) # isconcrete condition required to form a PartialStruct RT = PartialStruct(fallback_lattice, RT, Union{Nothing,Bool}[false,false], Any[TF, TF2]) end - info = ModifyOpInfo(callinfo.info) + info = IndirectCallInfo(callinfo.info, callinfo.effects, true) return CallMeta(RT, Any, Effects(), info) end end @@ -2346,6 +2349,7 @@ const _PURE_BUILTINS = Any[ tuple, svec, ===, + Core.task_result_type, typeof, nfields, ] @@ -2407,6 +2411,7 @@ const _INACCESSIBLEMEM_BUILTINS = Any[ fieldtype, isa, nfields, + Core.task_result_type, throw, Core.throw_methoderror, tuple, @@ -2615,6 +2620,7 @@ const _EFFECTS_KNOWN_BUILTINS = Any[ # setglobalonce!, swapfield!, # swapglobal!, + Core.task_result_type, throw, tuple, typeassert, @@ -3050,7 +3056,7 @@ function intrinsic_effects(f::IntrinsicFunction, argtypes::Vector{Any}) # llvmcall can do arbitrary things return Effects() elseif f === atomic_pointermodify - # atomic_pointermodify has memory effects, plus any effects from the ModifyOpInfo + # atomic_pointermodify has memory effects, plus any effects from the IndirectCallInfo return Effects() end is_effect_free = _is_effect_free_infer(f) @@ -3262,6 +3268,15 @@ add_tfunc(replaceglobal!, 4, 6, @nospecs((๐•ƒ::AbstractLattice, args...)->Any), add_tfunc(setglobalonce!, 3, 5, @nospecs((๐•ƒ::AbstractLattice, args...)->Bool), 3) add_tfunc(Core.get_binding_type, 2, 2, @nospecs((๐•ƒ::AbstractLattice, args...)->Type), 0) +@nospecs function task_result_type_tfunc(๐•ƒ::AbstractLattice, T) + hasintersect(widenconst(T), Task) || return Union{} + if T isa PartialTask + return Type{widenconst(T.fetch_type)} + end + return Type +end +add_tfunc(Core.task_result_type, 1, 1, task_result_type_tfunc, 0) + # foreigncall # =========== diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl index b729ca0bd34ab..a331527820d1e 100644 --- a/Compiler/src/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -643,6 +643,9 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter, cycleid:: if isa(result_type, Const) rettype_const = result_type.val const_flags = is_result_constabi_eligible(result) ? 0x3 : 0x2 + elseif isa(result_type, PartialTask) + rettype_const = result_type + const_flags = 0x2 elseif isa(result_type, PartialOpaque) rettype_const = result_type const_flags = 0x2 @@ -1154,6 +1157,8 @@ function cached_return_type(code::CodeInstance) return PartialStruct(fallback_lattice, rettype, undefs, fields) elseif isa(rettype_const, PartialOpaque) && rettype <: Core.OpaqueClosure return rettype_const + elseif isa(rettype_const, PartialTask) && rettype !== PartialTask + return rettype_const elseif isa(rettype_const, InterConditional) && rettype !== InterConditional return rettype_const elseif isa(rettype_const, InterMustAlias) && rettype !== InterMustAlias diff --git a/Compiler/src/typelattice.jl b/Compiler/src/typelattice.jl index f4c3b051d3e3f..edd31bc54cc5a 100644 --- a/Compiler/src/typelattice.jl +++ b/Compiler/src/typelattice.jl @@ -4,9 +4,9 @@ # structs/constants # ##################### -# N.B.: Const/PartialStruct/InterConditional are defined in Core, to allow them to be used +# N.B.: Const/PartialStruct/InterConditional/PartialTask are defined in Core, to allow them to be used # inside the global code cache. -import Core: Const, InterConditional, PartialStruct +import Core: Const, InterConditional, PartialStruct, PartialTask function may_form_limited_typ(@nospecialize(aty), @nospecialize(bty), @nospecialize(xty)) if aty isa LimitedAccuracy @@ -514,6 +514,14 @@ end elseif isa(b, PartialOpaque) return false end + if isa(a, PartialTask) + if isa(b, PartialTask) + return โŠ‘(lattice, a.fetch_type, b.fetch_type) && โŠ‘(lattice, a.fetch_error, b.fetch_error) + end + return โŠ‘(widenlattice(lattice), Task, b) + elseif isa(b, PartialTask) + return false + end return โŠ‘(widenlattice(lattice), a, b) end @@ -581,6 +589,11 @@ end return is_lattice_equal(lattice, a.env, b.env) end isa(b, PartialOpaque) && return false + if isa(a, PartialTask) + isa(b, PartialTask) || return false + return is_lattice_equal(lattice, a.fetch_type, b.fetch_type) && is_lattice_equal(lattice, a.fetch_error, b.fetch_error) + end + isa(b, PartialTask) && return false return is_lattice_equal(widenlattice(lattice), a, b) end @@ -643,6 +656,18 @@ end ti = typeintersect(widev, t) valid_as_lattice(ti, true) || return Bottom return PartialOpaque(ti, v.env, v.parent, v.source) + elseif isa(v, PartialTask) + has_free_typevars(t) && return v + if Task <: t + return v + end + ti = typeintersect(Task, t) + valid_as_lattice(ti, true) || return Bottom + if ti === Task + return v + else + return Bottom # PartialTask can only be a Task + end end return tmeet(widenlattice(lattice), v, t) end @@ -704,6 +729,7 @@ widenconst(c::Const) = (v = c.val; isa(v, Type) ? Type{v} : typeof(v)) widenconst(::PartialTypeVar) = TypeVar widenconst(t::Core.PartialStruct) = t.typ widenconst(t::PartialOpaque) = t.typ +widenconst(t::PartialTask) = Task @nospecializeinfer widenconst(@nospecialize t::Type) = t widenconst(::TypeVar) = error("unhandled TypeVar") widenconst(::TypeofVararg) = error("unhandled Vararg") diff --git a/Compiler/src/typelimits.jl b/Compiler/src/typelimits.jl index af5e7964be0a6..d90b382f41101 100644 --- a/Compiler/src/typelimits.jl +++ b/Compiler/src/typelimits.jl @@ -396,6 +396,9 @@ end return false end return false + elseif typea isa PartialTask + typeb isa PartialTask || return false + return issimplertype(๐•ƒ, typea.fetch_type, typeb.fetch_type) end return true end @@ -718,6 +721,24 @@ end typeb = widenlattice(wl, typeb) end + # type-lattice for PartialTask wrapper + apt = isa(typea, PartialTask) + bpt = isa(typeb, PartialTask) + if apt && bpt + # Both are PartialTask - merge their fetch types and errors + merged_fetch_type = tmerge(lattice, typea.fetch_type, typeb.fetch_type) + merged_fetch_error = tmerge(lattice, typea.fetch_error, typeb.fetch_error) + # If both are Any, no additional type information - return Task + if merged_fetch_type === Any && merged_fetch_error === Any + return Task + end + return PartialTask(merged_fetch_type, merged_fetch_error) + elseif apt + typea = Task + elseif bpt + typeb = Task + end + return tmerge(wl, typea, typeb) end diff --git a/Compiler/test/effects.jl b/Compiler/test/effects.jl index 18cfc0e8d5388..192d30f1df958 100644 --- a/Compiler/test/effects.jl +++ b/Compiler/test/effects.jl @@ -1479,3 +1479,11 @@ let effects = Base.infer_effects((Core.SimpleVector,Int); optimize=false) do sve @test !Compiler.is_nothrow(effects) @test Compiler.is_terminates(effects) end + +# task_result_type effects modeling (should have !consistent effect) +let effects = Base.infer_effects(Core.task_result_type, (Task,)) + @test !Compiler.is_consistent(effects) # !consistent bit should be set + @test Compiler.is_effect_free(effects) + @test Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) +end diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index c8df59a60c05e..6aba6c0a6e852 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -6522,4 +6522,16 @@ function haskey_inference_test() end @inferred haskey_inference_test() +@test Base.infer_return_type(Core.task_result_type, (Task,)) === Type +task_returner() = Task(() -> "hello") +@test Base.infer_return_type((typeof(task_returner),)) do f + Core.task_result_type(f()) +end === Type{String} +@test Base.infer_return_type((typeof(task_returner),)) do f + fetch(f()) +end === String +@test Base.infer_return_type((Int,)) do i + fetch(Threads.@spawn sin(i)) +end === Float64 + end # module inference diff --git a/Compiler/test/inline.jl b/Compiler/test/inline.jl index bde710a3ef9c0..269217cae3f7e 100644 --- a/Compiler/test/inline.jl +++ b/Compiler/test/inline.jl @@ -1846,6 +1846,32 @@ let src = code_typed1((AtomicMemoryRef{Int},)) do a @test count(isinvokemodify(:+), src.code) == 1 end +# Core._task handling +# =================== +# Test that _task inlines properly with const prop +f_task_invoke() = 42 +let src = code_typed1(()) do + return Task(f_task_invoke) + end + m = which(f_task_invoke, ()) + @test count(e -> begin + if iscall((src, Core._task), e) && e isa Expr && e.head === :call && length(e.args) == 4 + ci = e.args[4] + if ci isa CodeInstance && ci.def.def === m + return true + end + end + return false + end, src.code) == 1 +end + +# Test that task_result_type gets inlined to its constant value +let src = code_typed1((Task,)) do t; Core.task_result_type(t); end + # Should be inlined to Type{Any} constant, no call to task_result_type + @test count(iscall((src, Core.task_result_type)), src.code) == 0 + @test src.code[end] == ReturnNode(Any) +end + # apply `ssa_inlining_pass` multiple times func_mul_int(a::Int, b::Int) = Core.Intrinsics.mul_int(a, b) multi_inlining1(a::Int, b::Int) = @noinline func_mul_int(a, b) diff --git a/base/boot.jl b/base/boot.jl index bbf3fc0faa227..071cb7955cfcf 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -546,6 +546,7 @@ eval(Core, quote _PartialStruct(@nospecialize(typ), undef, fields::Array{Any, 1}) = $(Expr(:new, :PartialStruct, :typ, :undef, :fields)) PartialOpaque(@nospecialize(typ), @nospecialize(env), parent::MethodInstance, source) = $(Expr(:new, :PartialOpaque, :typ, :env, :parent, :source)) InterConditional(slot::Int, @nospecialize(thentype), @nospecialize(elsetype)) = $(Expr(:new, :InterConditional, :slot, :thentype, :elsetype)) + PartialTask(@nospecialize(fetch_type), @nospecialize(fetch_error)) = $(Expr(:new, :PartialTask, :fetch_type, :fetch_error)) MethodMatch(@nospecialize(spec_types), sparams::SimpleVector, method::Method, fully_covers::Bool) = $(Expr(:new, :MethodMatch, :spec_types, :sparams, :method, :fully_covers)) end) @@ -574,9 +575,6 @@ end GlobalRef(m::Module, s::Symbol) = ccall(:jl_module_globalref, Ref{GlobalRef}, (Any, Any), m, s) Module(name::Symbol=:anonymous, std_imports::Bool=true, default_names::Bool=true) = ccall(:jl_f_new_module, Ref{Module}, (Any, Bool, Bool), name, std_imports, default_names) -function _Task(@nospecialize(f), reserved_stack::Int, completion_future) - return ccall(:jl_new_task, Ref{Task}, (Any, Any, Int), f, completion_future, reserved_stack) -end const NTuple{N,T} = Tuple{Vararg{T,N}} diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index a6abf3e384cfb..571c1f6694382 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -3960,6 +3960,31 @@ The current differences are: """ Core.finalizer +""" + Core._task(f, size) -> Task + Core._task(f, size, invoked) -> Task + +This builtin is an implementation detail used by the `Task` constructor and should +not be called directly by end-users. Use `Task(f)` instead. + +Creates a new `Task` that will execute function `f` with the specified stack size. +The optional third argument `invoked` can be a `Method`, `MethodInstance`, `CodeInstance`, +or `Type` that will be used for optimized task invocation via `Core.invoke`. + +This is a low-level interface that bypasses safety checks and initialization +performed by the public `Task` constructor. +""" +Core._task + +""" + Core.task_result_type(task) -> Type + +The builtin function returns a conservative upper bound for the return type of the closure +provided when the `Task` was created. +Always return the type `Any`. However inference may replace it with any other Type. +""" +Core.task_result_type + """ ConcurrencyViolationError(msg) <: Exception diff --git a/base/task.jl b/base/task.jl index 1d234e8784ad9..122a5246c8f45 100644 --- a/base/task.jl +++ b/base/task.jl @@ -2,7 +2,11 @@ ## basic task functions and TLS -Core.Task(@nospecialize(f), reserved_stack::Int=0) = Core._Task(f, reserved_stack, ThreadSynchronizer()) +function Core.Task(@nospecialize(f), reserved_stack::Int=0) + task = Core._task(f, reserved_stack) + task.donenotify = ThreadSynchronizer() + return task +end # Container for a captured exception and its backtrace. Can be serialized. struct CapturedException <: Exception @@ -357,7 +361,7 @@ in an error, thrown as a [`TaskFailedException`](@ref) which wraps the failed ta Throws a `ConcurrencyViolationError` if `t` is the currently running task, to prevent deadlocks. """ -function wait(t::Task; throw=true) +@noinline function wait(t::Task; throw=true) _wait(t) if throw && istaskfailed(t) Core.throw(TaskFailedException(t)) @@ -521,9 +525,9 @@ Wait for a [`Task`](@ref) to finish, then return its result value. If the task fails with an exception, a [`TaskFailedException`](@ref) (which wraps the failed task) is thrown. """ -function fetch(t::Task) +@inline function fetch(t::Task) wait(t) - return task_result(t) + return task_result(t)::Core.task_result_type(t) end @@ -1064,7 +1068,7 @@ function yield(t::Task, @nospecialize(x=nothing)) record_running_time!(ct) # [task] created -scheduled-> wait_time maybe_record_enqueued!(t) - t.result = x + setfield!(t, :result, x) enq_work(ct) set_next_task(t) return try_yieldto(ensure_rescheduled) @@ -1091,7 +1095,7 @@ function yieldto(t::Task, @nospecialize(x=nothing)) record_running_time!(ct) # [task] created -scheduled-unfairly-> wait_time maybe_record_enqueued!(t) - t.result = x + setfield!(t, :result, x) set_next_task(t) return try_yieldto(identity) end @@ -1110,12 +1114,12 @@ function try_yieldto(undo) end if ct._isexception exc = ct.result - ct.result = nothing + setfield!(ct, :result, nothing) ct._isexception = false throw(exc) end result = ct.result - ct.result = nothing + setfield!(ct, :result, nothing) return result end @@ -1126,7 +1130,7 @@ function throwto(t::Task, @nospecialize exc) record_running_time!(ct) # [task] created -scheduled-unfairly-> wait_time maybe_record_enqueued!(t) - t.result = exc + setfield!(t, :result, exc) t._isexception = true set_next_task(t) return try_yieldto(identity) diff --git a/doc/src/devdocs/builtins.md b/doc/src/devdocs/builtins.md index ce56a7f9a6b91..e2a643c69ab27 100644 --- a/doc/src/devdocs/builtins.md +++ b/doc/src/devdocs/builtins.md @@ -37,12 +37,151 @@ Core.memoryrefsetonce! Core.get_binding_type ``` +## Various helper functions +``` +Base.quoted +Base.isa_ast_node +``` + ## Other ```@docs Core.IntrinsicFunction Core.Intrinsics Core.IR -Base.quoted -Base.isa_ast_node +Core._task +Core.task_result_type +``` + +## Adding New Builtin Functions + +This section documents the process for adding a new builtin function to Julia, (using `_task` as an example). + +### Overview + +Adding a new builtin function requires changes across multiple subsystems in Julia: +1. C runtime implementation +2. Julia inference integration +3. Complex inference integration (if required) +4. Optimization passes (if applicable) +5. Codegen changes (if applicable) + +### Step-by-Step Process + +#### 1. Add to Builtin Function Registry + +**File: `src/builtin_proto.h`** +```c +// Add to JL_BUILTIN_FUNCTIONS macro (alphabetically) +#define JL_BUILTIN_FUNCTIONS(XX) \ + // ... other functions + XX(_using, "_using") \ + XX(_your_builtin,"_your_builtin") \ // <-- Add your function here + XX(applicable,"applicable") \ + // ... other functions +``` + +**File: `src/builtins.c`** +```c +// Implement the C function +JL_CALLABLE(jl__your_builtin) +{ + JL_NARGS(_your_builtin, 2, 3); + JL_TYPECHK(_your_builtin, long, args[1]); + return _your_builtin_impl(args, nargs); +} +``` + +**Argument validation**: Always validate argument count and types in the C implementation. + +#### 2. Julia Inference Integration + +**File: `Compiler/src/tfuncs.jl`** +```julia +# Add simple tfunc if no special state is required +add_tfunc(Core._your_builtin, 2, 3, (@nospecialize(arg1), @nospecialize(arg2), @nospecialize(optional...)) -> Typ, 20) + +# OR for complex cases requiring AbstractInterpreter state: +# Leave out add_tfunc and implement in abstractinterpretation.jl instead +``` + +#### 3. Complex Inference Integration (if `add_tfunc` was insufficient) + +**File: `Compiler/src/stmtinfo.jl`** +```julia +# For builtins that perform indirect calls, use IndirectCallInfo. +# Set add_edges=true, unless the info is only for inlining and not used by inference +# +info_result = IndirectCallInfo(callinfo.info, callinfo.effects, true) +``` + +**File: `Compiler/src/abstractinterpretation.jl`** +```julia +# Add to builtin handling in abstract_call_builtin +elseif f === Core._your_builtin + return Future(abstract_eval_your_builtin(interp, arginfo, si, sv)) + +# Implement custom abstract evaluation if needed +function abstract_eval_your_builtin(interp::AbstractInterpreter, arginfo::ArgInfo, si::StmtInfo, sv::AbsIntState) + # Validation and inference logic + return CallMeta(return_type, exception_type, effects, call_info) +end +``` + +#### 4. Optimization Passes (if applicable) + +For example, if the operator has a special inlining: + +**File: `Compiler/src/ssair/inlining.jl`** +```julia +# Add to special builtin handling +if (f !== Core.invoke && + f !== Core.finalizer && + # ... other functions + f !== Core._your_builtin) # <-- Add here + +# Add optimization logic in `assemble_inline_todo!` +elseif f === Core._your_builtin + handle_your_builtin_call!(ir, idx, stmt, info, state) +end +``` + +#### 5. Documentation + +**File: `base/docs/basedocs.jl`** +```julia +""" + Core._your_builtin(arg1, arg2) -> ReturnType + Core._your_builtin(arg1, arg2, optional_arg) -> ReturnType + +This builtin is an implementation detail used by [higher-level function] and should +not be called directly by end-users. Use `HigherLevelFunction(args...)` instead. + +Brief description of what the builtin does and its parameters. +The optional third argument `optional_arg` can be a [description of types/purpose]. +""" +Core._your_builtin +``` + +**File: `doc/src/devdocs/builtins.md`** +```julia +# Add to the @docs block in the appropriate section +Core._your_builtin +``` + +#### 6. Testing + +**File: `Compiler/test/effects.jl`** +```julia +# Add test for effects modeling (if your builtin has specific effects) +let effects = Base.infer_effects(Core._your_builtin, (ArgType,)) + @test !Compiler.is_consistent(effects) + @test Compiler.is_nothrow(effects) +end +``` + +**File: `Compiler/test/inference.jl`** +```julia +# Add test for return type inference +@test Base.infer_return_type(Core._your_builtin, (ArgType,)) === ExpectedReturnType ``` diff --git a/src/builtin_proto.h b/src/builtin_proto.h index 607106f35bac0..e0eb8581e16bb 100644 --- a/src/builtin_proto.h +++ b/src/builtin_proto.h @@ -21,6 +21,7 @@ extern "C" { XX(_setsuper,"_setsuper!") \ XX(_structtype,"_structtype") \ XX(_svec_ref,"_svec_ref") \ + XX(_task,"_task") \ XX(_typebody,"_typebody!") \ XX(_typevar,"_typevar") \ XX(_using, "_using") \ @@ -68,6 +69,7 @@ extern "C" { XX(svec,"svec") \ XX(swapfield,"swapfield!") \ XX(swapglobal,"swapglobal!") \ + XX(task_result_type,"task_result_type") \ XX(throw,"throw") \ XX(throw_methoderror,"throw_methoderror") \ XX(tuple,"tuple") \ diff --git a/src/builtins.c b/src/builtins.c index 149253e073fff..98090350a4575 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2166,6 +2166,26 @@ JL_CALLABLE(jl_f__svec_ref) return jl_svecref(s, idx-1); } +JL_CALLABLE(jl_f__task) +{ + JL_NARGS(_task, 2, 3); + jl_value_t *start = args[0]; + JL_TYPECHK(_task, long, args[1]); + size_t ssize = jl_unbox_long(args[1]); + jl_value_t *invoke_arg = NULL; + if (nargs >= 3) + invoke_arg = args[2]; + return (jl_value_t*)jl_new_task(start, jl_nothing, ssize, invoke_arg); +} + +JL_CALLABLE(jl_f_task_result_type) +{ + JL_NARGS(task_result_type, 1, 1); + JL_TYPECHK(task_result_type, task, args[0]); + // Without inference, this returns Any, but inference can inject other Types here + return (jl_value_t*)jl_any_type; +} + static int equiv_field_types(jl_value_t *old, jl_value_t *ft) { size_t nf = jl_svec_len(ft); @@ -2564,6 +2584,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("Const", (jl_value_t*)jl_const_type); add_builtin("PartialStruct", (jl_value_t*)jl_partial_struct_type); add_builtin("PartialOpaque", (jl_value_t*)jl_partial_opaque_type); + add_builtin("PartialTask", (jl_value_t*)jl_partial_task_type); add_builtin("InterConditional", (jl_value_t*)jl_interconditional_type); add_builtin("MethodMatch", (jl_value_t*)jl_method_match_type); add_builtin("Function", (jl_value_t*)jl_function_type); diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index dc3073f42ddeb..c1963748b2173 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -101,6 +101,7 @@ XX(opaque_closure_typename, jl_typename_t*) \ XX(pair_type, jl_value_t*) \ XX(partial_opaque_type, jl_datatype_t*) \ + XX(partial_task_type, jl_datatype_t*) \ XX(partial_struct_type, jl_datatype_t*) \ XX(phicnode_type, jl_datatype_t*) \ XX(phinode_type, jl_datatype_t*) \ diff --git a/src/jltypes.c b/src/jltypes.c index b6d11d9c03990..73e2ae5067ec1 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -2971,6 +2971,12 @@ void export_jl_sysimg_globals(void) jl_##name##_type->smalltag = jl_##name##_tag; void jl_init_types(void) JL_GC_DISABLED { + // n.b. When adding fields to existing types, update const/atomic field bitvectors carefully: + // Bits represent field positions (0-indexed). + // Prefer `0b` notation (converting if needed from `0x`). + // Only set bits for new atomic/const fields, shift existing bits as needed. + // Only 32 bits in one field, overflow goes into the next field. + jl_module_t *core = NULL; // will need to be assigned later // create base objects @@ -3781,7 +3787,7 @@ void jl_init_types(void) JL_GC_DISABLED NULL, jl_any_type, jl_emptysvec, - jl_perm_symsvec(27, + jl_perm_symsvec(28, "next", "queue", "storage", @@ -3789,6 +3795,7 @@ void jl_init_types(void) JL_GC_DISABLED "result", "scope", "code", + "invoked", "_state", "sticky", "priority", @@ -3809,7 +3816,8 @@ void jl_init_types(void) JL_GC_DISABLED "last_started_running_at", "running_time_ns", "finished_at"), - jl_svec(27, + jl_svec(28, + jl_any_type, jl_any_type, jl_any_type, jl_any_type, @@ -3842,10 +3850,10 @@ void jl_init_types(void) JL_GC_DISABLED XX(task); jl_value_t *listt = jl_new_struct(jl_uniontype_type, jl_task_type, jl_nothing_type); jl_svecset(jl_task_type->types, 0, listt); - // Set field 20 (metrics_enabled) as const - // Set fields 8 (_state) and 24-27 (metric counters) as atomic - const static uint32_t task_constfields[1] = { 0b00000000000010000000000000000000 }; - const static uint32_t task_atomicfields[1] = { 0b00000111100000000000000010000000 }; + // Set field 21 (metrics_enabled) as const + // Set fields 9 (_state) and 25-28 (metric counters) as atomic + const static uint32_t task_constfields[] = { 0b0000000100000000000000000000 }; + const static uint32_t task_atomicfields[] = { 0b1111000000000000000100000000 }; jl_task_type->name->constfields = task_constfields; jl_task_type->name->atomicfields = task_atomicfields; @@ -3863,6 +3871,10 @@ void jl_init_types(void) JL_GC_DISABLED jl_perm_symsvec(4, "typ", "env", "parent", "source"), jl_svec(4, jl_type_type, jl_any_type, jl_method_instance_type, jl_any_type), jl_emptysvec, 0, 0, 4); + jl_partial_task_type = jl_new_datatype(jl_symbol("PartialTask"), core, jl_any_type, jl_emptysvec, + jl_perm_symsvec(2, "fetch_type", "fetch_error"), + jl_svec(2, jl_any_type, jl_any_type), + jl_emptysvec, 0, 0, 2); // complete builtin type metadata jl_uint8pointer_type = (jl_datatype_t*)jl_apply_type1((jl_value_t*)jl_pointer_type, (jl_value_t*)jl_uint8_type); diff --git a/src/julia.h b/src/julia.h index 34067f4c1e204..05d96aab82222 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2297,7 +2297,7 @@ struct _jl_handler_t { #define JL_TASK_STATE_DONE 1 #define JL_TASK_STATE_FAILED 2 -JL_DLLEXPORT jl_task_t *jl_new_task(jl_value_t*, jl_value_t*, size_t); +JL_DLLEXPORT jl_task_t *jl_new_task(jl_value_t*, jl_value_t*, size_t, jl_value_t*); JL_DLLEXPORT void jl_switchto(jl_task_t **pt); JL_DLLEXPORT int jl_set_task_tid(jl_task_t *task, int16_t tid) JL_NOTSAFEPOINT; JL_DLLEXPORT int jl_set_task_threadpoolid(jl_task_t *task, int8_t tpid) JL_NOTSAFEPOINT; diff --git a/src/julia_internal.h b/src/julia_internal.h index 6bb166352a5b0..da69fbe56c8a1 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -808,6 +808,7 @@ JL_DLLEXPORT void jl_typeassert(jl_value_t *x, jl_value_t *t); JL_DLLEXPORT jl_value_t *name(jl_value_t *F, jl_value_t **args, uint32_t nargs) JL_CALLABLE(jl_f_tuple); +JL_CALLABLE(jl_f_invoke); void jl_install_default_signal_handlers(void); void restore_signals(void); void jl_install_thread_signal_handler(jl_ptls_t ptls); diff --git a/src/julia_threads.h b/src/julia_threads.h index a8e796cd879cb..a98a89027df95 100644 --- a/src/julia_threads.h +++ b/src/julia_threads.h @@ -240,6 +240,7 @@ typedef struct _jl_task_t { jl_value_t *result; jl_value_t *scope; jl_value_t *start; + jl_value_t *invoked; // Method/CodeInstance/Type for optimized task invocation _Atomic(uint8_t) _state; uint8_t sticky; // record whether this Task can be migrated to a new thread uint16_t priority; diff --git a/src/task.c b/src/task.c index 944310b1ab5ff..1c9d5cb925a52 100644 --- a/src/task.c +++ b/src/task.c @@ -1073,7 +1073,7 @@ void jl_rng_split(uint64_t dst[JL_RNG_SIZE], uint64_t src[JL_RNG_SIZE]) JL_NOTSA } } -JL_DLLEXPORT jl_task_t *jl_new_task(jl_value_t *start, jl_value_t *completion_future, size_t ssize) +JL_DLLEXPORT jl_task_t *jl_new_task(jl_value_t *start, jl_value_t *completion_future, size_t ssize, jl_value_t *invoke_arg) { jl_task_t *ct = jl_current_task; jl_task_t *t = (jl_task_t*)jl_gc_alloc(ct->ptls, sizeof(jl_task_t), jl_task_type); @@ -1107,6 +1107,7 @@ JL_DLLEXPORT jl_task_t *jl_new_task(jl_value_t *start, jl_value_t *completion_fu t->tls = jl_nothing; jl_atomic_store_relaxed(&t->_state, JL_TASK_STATE_RUNNABLE); t->start = start; + t->invoked = invoke_arg; t->result = jl_nothing; t->donenotify = completion_future; jl_atomic_store_relaxed(&t->_isexception, 0); @@ -1250,7 +1251,14 @@ CFI_NORETURN jl_sigint_safepoint(ptls); } JL_TIMING(ROOT, ROOT); - res = jl_apply(&ct->start, 1); + // Check if we can use optimized invocation + if (ct->invoked != NULL) { + jl_value_t *invoke_args[2] = {ct->start, ct->invoked}; + res = jl_f_invoke(NULL, invoke_args, 2); + } + else { + res = jl_apply(&ct->start, 1); + } } JL_CATCH { res = jl_current_exception(ct); @@ -1571,6 +1579,7 @@ jl_task_t *jl_init_root_task(jl_ptls_t ptls, void *stack_lo, void *stack_hi) ct->tls = jl_nothing; jl_atomic_store_relaxed(&ct->_state, JL_TASK_STATE_RUNNABLE); ct->start = NULL; + ct->invoked = NULL; ct->result = jl_nothing; ct->donenotify = jl_nothing; jl_atomic_store_relaxed(&ct->_isexception, 0); diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index ee40ebdd4abad..e4bda6b25deb8 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -498,6 +498,10 @@ function serialize(s::AbstractSerializer, t::Task) if istaskstarted(t) && !istaskdone(t) error("cannot serialize a running Task") end + if isdefined(t, :invoked) + error("cannot serialize a Task constructed with invoke info") + # we could serialize it though, as long as the info isn't for a CodeInstance + end writetag(s.io, TASK_TAG) serialize(s, t.code) serialize(s, t.storage) diff --git a/test/channels.jl b/test/channels.jl index 721eb478bd13a..c90ae0962dbd0 100644 --- a/test/channels.jl +++ b/test/channels.jl @@ -700,3 +700,34 @@ end @test_throws ErrorException(message) t.scope @test t.state == :runnable end + +function _task(@nospecialize(f), stack::Int, @nospecialize(invoke)) + t = Core._task(f, stack, invoke) + t.donenotify = Base.ThreadSynchronizer() + return t +end +@testset "Core._task with invoke arguments" begin + # Test _task with invoke Type argument + f1() = 43 + f1(x...) = x + t1 = _task(f1, 0, Tuple{}) + @test fetch(schedule(t1)) === 43 + t1 = _task(f1, 0, Tuple{Vararg}) + @test fetch(schedule(t1)) === () + t1 = _task(f1, 0, Tuple{typeof(f1)}) + schedule(t1) + @test_throws TaskFailedException fetch(t1) + + # Test _task with Method argument + m = which(f1, (Vararg,)) + t1 = _task(f1, 0, m) + @test fetch(schedule(t1)) === () + + # Test that _task validates argument types + t1 = _task(f1, 0, "invalid") + schedule(t1) + @test_throws TaskFailedException fetch(t1) + @test_throws TypeError Core._task(f1, "invalid_size") + @test_throws TypeError Core._task(f1, "invalid_size", m) + @test_throws ArgumentError Core._task(f1, 0, m, 1) +end