diff --git a/prospect/fitting/fitting.py b/prospect/fitting/fitting.py index 1853a58d..ef88ec4b 100755 --- a/prospect/fitting/fitting.py +++ b/prospect/fitting/fitting.py @@ -15,12 +15,12 @@ from .minimizer import minimize_wrapper, minimizer_ball from .ensemble import run_emcee_sampler -from .nested import run_dynesty_sampler +from .nested import run_nested_sampler, parse_nested_kwargs from ..likelihood.likelihood import compute_chi, compute_lnlike __all__ = ["lnprobfn", "fit_model", - "run_minimize", "run_emcee", "run_dynesty" + "run_minimize", "run_ensemble", "run_nested" ] @@ -123,7 +123,8 @@ def wrap_lnp(lnpfn, observations, model, sps, **lnp_kwargs): def fit_model(observations, model, sps, lnprobfn=lnprobfn, - optimize=False, emcee=False, dynesty=True, **kwargs): + optimize=False, emcee=False, nested_sampler="", + **kwargs): """Fit a model to observations using a number of different methods Parameters @@ -167,7 +168,7 @@ def fit_model(observations, model, sps, lnprobfn=lnprobfn, + ``hfile``: an open h5py.File file handle for writing result incrementally Many additional emcee parameters can be provided here, see - :py:func:`run_emcee` for details. + :py:func:`run_ensemble` for details. dynesty : bool (optional, default: True) If ``True``, sample from the posterior using dynesty. Additonal @@ -184,11 +185,7 @@ def fit_model(observations, model, sps, lnprobfn=lnprobfn, # Make sure obs has required keys [obs.rectify() for obs in observations] - if emcee & dynesty: - msg = ("Cannot run both emcee and dynesty fits " - "in a single call to fit_model") - raise(ValueError, msg) - if (not emcee) & (not dynesty) & (not optimize): + if (not bool(emcee)) & (not bool(nested_sampler)) & (not optimize): msg = ("No sampling or optimization routine " "specified by user; returning empty results") warnings.warn(msg) @@ -204,14 +201,16 @@ def fit_model(observations, model, sps, lnprobfn=lnprobfn, output["optimization"] = (optres, topt) if emcee: - run_sampler = run_emcee - elif dynesty: - run_sampler = run_dynesty + run_sampler = run_ensemble + elif nested_sampler: + run_sampler = run_nested + kwargs["fitter"] = nested_sampler else: return output output["sampling"] = run_sampler(observations, model, sps, - lnprobfn=lnprobfn, **kwargs) + lnprobfn=lnprobfn, + **kwargs) return output @@ -305,8 +304,8 @@ def run_minimize(observations=None, model=None, sps=None, lnprobfn=lnprobfn, return results, tm, best -def run_emcee(observations, model, sps, lnprobfn=lnprobfn, - hfile=None, initial_positions=None, **kwargs): +def run_ensemble(observations, model, sps, lnprobfn=lnprobfn, + hfile=None, initial_positions=None, **kwargs): """Run emcee, optionally including burn-in and convergence checking. Thin wrapper on :py:class:`prospect.fitting.ensemble.run_emcee_sampler` @@ -387,6 +386,7 @@ def run_emcee(observations, model, sps, lnprobfn=lnprobfn, q = model.theta.copy() postkwargs = {} + # Hack for MPI pools to access the global namespace for item in ['observations', 'model', 'sps']: val = eval(item) if val is not None: @@ -396,26 +396,32 @@ def run_emcee(observations, model, sps, lnprobfn=lnprobfn, # Could try to make signatures for these two methods the same.... if initial_positions is not None: raise NotImplementedError - meth = restart_emcee_sampler - t = time.time() - out = meth(lnprobfn, initial_positions, hdf5=hfile, - postkwargs=postkwargs, **kwargs) + go = time.time() + out = restart_emcee_sampler(lnprobfn, initial_positions, + hdf5=hfile, + postkwargs=postkwargs, + **kwargs) sampler = out - ts = time.time() - t + ts = time.time() - go else: - meth = run_emcee_sampler - t = time.time() - out = meth(lnprobfn, q, model, hdf5=hfile, - postkwargs=postkwargs, **kwargs) - sampler, burn_p0, burn_prob0 = out - ts = time.time() - t + go = time.time() + out = run_emcee_sampler(lnprobfn, q, model, + hdf5=hfile, + postkwargs=postkwargs, + **kwargs) + sampler, burn_loc0, burn_prob0 = out + ts = time.time() - go return sampler, ts -def run_dynesty(observations, model, sps, lnprobfn=lnprobfn, - pool=None, nested_target_n_effective=10000, **kwargs): - """Thin wrapper on :py:class:`prospect.fitting.nested.run_dynesty_sampler` +def run_nested(observations, model, sps, + lnprobfn=lnprobfn, + fitter="dynesty", + nested_nlive=1000, + nested_neff=1000, + **kwargs): + """Thin wrapper on :py:class:`prospect.fitting.nested.run_nested_sampler` Parameters ---------- @@ -436,43 +442,33 @@ def run_dynesty(observations, model, sps, lnprobfn=lnprobfn, ``model``, and ``sps`` as keywords. By default use the :py:func:`lnprobfn` defined above. - Extra Parameters - -------- - nested_bound: (optional, default: 'multi') - - nested_sample: (optional, default: 'unif') - - nested_nlive_init: (optional, default: 100) - - nested_nlive_batch: (optional, default: 100) - - nested_dlogz_init: (optional, default: 0.02) - - nested_maxcall: (optional, default: None) - - nested_walks: (optional, default: 25) - Returns -------- - result: - An instance of :py:class:`dynesty.results.Results`. + result: Dictionary + Will have keys: + * points : parameter location of the samples + * log_weight : ln of the weights of each sample + * log_like : ln of the likelihoods of each sample t_wall : float Duration of sampling in seconds of wall time. """ - from dynesty.dynamicsampler import stopping_function, weight_function - nested_stop_kwargs = {"target_n_effective": nested_target_n_effective} - - lnp = wrap_lnp(lnprobfn, observations, model, sps, nested=True) - - # Need to deal with postkwargs... - - t = time.time() - dynestyout = run_dynesty_sampler(lnp, model.prior_transform, model.ndim, - stop_function=stopping_function, - wt_function=weight_function, - nested_stop_kwargs=nested_stop_kwargs, - pool=pool, **kwargs) - ts = time.time() - t - - return dynestyout, ts + # wrap the probability fiunction, making sure it's a likelihood + likelihood = wrap_lnp(lnprobfn, observations, model, sps, nested=True) + + # which keywords do we have for this fitter? + ns_kwargs, nr_kwargs = parse_nested_kwargs(fitter=fitter, + **kwargs) + + go = time.time() + output = run_nested_sampler(model, + likelihood, + fitter=fitter, + verbose=False, + nested_nlive=nested_nlive, + nested_neff=nested_neff, + nested_sampler_kwargs=ns_kwargs, + nested_run_kwargs=nr_kwargs) + ts = time.time() - go + + return output, ts diff --git a/prospect/fitting/nested.py b/prospect/fitting/nested.py index dece9126..d5713677 100644 --- a/prospect/fitting/nested.py +++ b/prospect/fitting/nested.py @@ -4,66 +4,96 @@ from .fitting import lnprobfn -__all__ = ["run_nested", "run_dynesty_sampler"] +__all__ = ["run_nested_sampler", "parse_nested_kwargs"] -def run_nested(model, - lnprobfn=lnprobfn, - fitter="dynesty", - **kwargs): +def parse_nested_kwargs(fitter=None, **kwargs): + # todo: + # something like 'enlarge' + # something like 'bootstrap' or N_networks or? + + sampler_kwargs = {} + run_kwargs = {} + + if fitter == "dynesty": + sampler_kwargs["bound"] = kwargs["nested_bound"] + sampler_kwargs["method"] = kwargs["nested_sample"] + sampler_kwargs["walks"] = kwargs["nested_walks"] + run_kwargs["dlogz_init"] = kwargs["nested_dlogz"] + + elif fitter == "ultranest": + #run_kwargs["dlogz"] = kwargs["nested_dlogz"] + pass + + elif fitter == "nautilus": + pass + + else: + # say what? + raise ValueError(f"{fitter} not a valid fitter") + + return sampler_kwargs, run_kwargs + + + +def run_nested_sampler(model, + likelihood_function, + fitter="dynesty", + nested_nlive=1000, + nested_neff=1000, + verbose=False, + nested_run_kwargs={}, + nested_sampler_kwargs={}): """We give a model -- parameter discription and prior transform -- and a likelihood function. We get back samples, weights, and likelihood values. """ go = time.time() + # --- Nautilus --- + if fitter == "nautilus": + from nautilus import Sampler + + sampler = Sampler(model.prior_transform, + likelihood_function, + pass_dict=False, # likelihood expects array, not dict + n_live=nested_nlive, + **nested_sampler_kwargs) + sampler.run(n_eff=nested_neff, + verbose=verbose, + **nested_run_kwargs) + points, log_w, log_like = sampler.posterior() + # --- Ultranest --- if fitter == "ultranest": - # TODO: what about vector parameters from ultranest import ReactiveNestedSampler - sampler = ReactiveNestedSampler(model.free_params, - lnprobfn, - model.prior_transform) - result = sampler.run(**kwargs) + sampler = ReactiveNestedSampler(model.theta_labels, + likelihood_function, + model.prior_transform, + **nested_sampler_kwargs) + result = sampler.run(min_ess=nested_neff, + min_num_live_points=nested_nlive, + show_status=verbose, + **nested_run_kwargs) points = np.array(result['weighted_samples']['points']) log_w = np.log(np.array(result['weighted_samples']['weights'])) log_like = np.array(result['weighted_samples']['logl']) - # --- Nautilus --- - if fitter == "nautilus": - from nautilus import Prior, Sampler - - # we have to use the nautilus prior objects - # TODO: no we don't! - prior = Prior() - for k in params.param_names: - pr = params.priors[k] - if pr.kind == "Normal": - prior.add_parameter(k, dist=norm(pr.params['mean'], pr.params['sigma'])) - else: - prior.add_parameter(k, dist=(pr.params['mini'], pr.params['maxi'])) - sampler = Sampler(prior, lnprobfn, n_live=1000) - sampler.run(verbose=verbose) - - points, log_w, log_like = sampler.posterior() - # --- Dynesty --- if fitter == "dynesty": from dynesty import DynamicNestedSampler - sampler_kwargs=dict(nlive=1000, - bound='multi', - sample="unif", - walks=48) - - sampler = DynamicNestedSampler(lnprobfn, + sampler = DynamicNestedSampler(likelihood_function, model.prior_transform, model.ndim, - **sampler_kwargs) - sampler.run_nested(n_effective=1000, dlogz_init=0.05) + nlive=nested_nlive, + **nested_sampler_kwargs) + sampler.run_nested(n_effective=nested_neff, + print_progress=verbose, + **nested_run_kwargs) points = sampler.results["samples"] log_w = sampler.results["logwt"] @@ -72,17 +102,10 @@ def run_nested(model, # --- Nestle --- if fitter == "nestle": import nestle - - sampler_kwargs=dict(method=nestle_method, - npoints=nestle_npoints, - callback=callback, - maxcall=nestle_maxcall, - update_interval=nestle_update_interval) - result = nestle.sample(lnprobfn, model.prior_transform, model.ndim, - **sampler_kwargs) + **nested_sampler_kwargs) points = result["samples"] log_w = result["logwt"] @@ -90,170 +113,134 @@ def run_nested(model, dur = time.time() - go - return (points, log_w, log_like) - - -from dynesty.dynamicsampler import stopping_function, weight_function -def run_dynesty_sampler(lnprobfn, prior_transform, ndim, - verbose=True, - # sampler kwargs - nested_bound='multi', - nested_sample='unif', - nested_walks=25, - nested_update_interval=0.6, - nested_bootstrap=0, - pool=None, - use_pool={}, - queue_size=1, - # init sampling kwargs - nested_nlive_init=100, - nested_dlogz_init=0.02, - nested_maxiter_init=None, - nested_maxcall_init=None, - nested_live_points=None, - # batch sampling kwargs - nested_maxbatch=None, - nested_nlive_batch=100, - nested_maxiter_batch=None, - nested_maxcall_batch=None, - nested_use_stop=True, - # overall kwargs - nested_maxcall=None, - nested_maxiter=None, - stop_function=stopping_function, - wt_function=weight_function, - nested_weight_kwargs={'pfrac': 1.0}, - nested_stop_kwargs={}, - nested_save_bounds=False, - print_progress=True, - **extras): - - from dynesty import DynamicNestedSampler - - # instantiate sampler - dsampler = DynamicNestedSampler(lnprobfn, prior_transform, ndim, - bound=nested_bound, - sample=nested_sample, - walks=nested_walks, - bootstrap=nested_bootstrap, - update_interval=nested_update_interval, - pool=pool, queue_size=queue_size, use_pool=use_pool - ) - - # generator for initial nested sampling - ncall = dsampler.ncall - niter = dsampler.it - 1 - tstart = time.time() - - for results in dsampler.sample_initial(nlive=nested_nlive_init, - dlogz=nested_dlogz_init, - maxcall=nested_maxcall_init, - maxiter=nested_maxiter_init, - live_points=nested_live_points): - - try: - # dynesty >= 2.0 - (worst, ustar, vstar, loglstar, logvol, - logwt, logz, logzvar, h, nc, worst_it, - propidx, propiter, eff, delta_logz, blob) = results - except(ValueError): - # dynsety < 2.0 - (worst, ustar, vstar, loglstar, logvol, - logwt, logz, logzvar, h, nc, worst_it, - propidx, propiter, eff, delta_logz) = results - - if delta_logz > 1e6: - delta_logz = np.inf - ncall += nc - niter += 1 - - if print_progress: - with np.errstate(invalid='ignore'): - logzerr = np.sqrt(logzvar) - sys.stderr.write("\riter: {:d} | batch: {:d} | nc: {:d} | " - "ncall: {:d} | eff(%): {:6.3f} | " - "logz: {:6.3f} +/- {:6.3f} | " - "dlogz: {:6.3f} > {:6.3f} " - .format(niter, 0, nc, ncall, eff, logz, - logzerr, delta_logz, nested_dlogz_init)) - sys.stderr.flush() - - ndur = time.time() - tstart - if verbose: - print('\ndone dynesty (initial) in {0}s'.format(ndur)) - - if nested_maxcall is None: - nested_maxcall = sys.maxsize - if nested_maxbatch is None: - nested_maxbatch = sys.maxsize - if nested_maxcall_batch is None: - nested_maxcall_batch = sys.maxsize - if nested_maxiter is None: - nested_maxiter = sys.maxsize - if nested_maxiter_batch is None: - nested_maxiter_batch = sys.maxsize - - # generator for dynamic sampling - tstart = time.time() - for n in range(dsampler.batch, nested_maxbatch): - # Update stopping criteria. - dsampler.sampler.save_bounds = False - res = dsampler.results - mcall = min(nested_maxcall - ncall, nested_maxcall_batch) - miter = min(nested_maxiter - niter, nested_maxiter_batch) - if nested_use_stop: - if dsampler.use_pool_stopfn: - M = dsampler.M - else: - M = map - stop, stop_vals = stop_function(res, nested_stop_kwargs, - rstate=dsampler.rstate, M=M, - return_vals=True) - stop_val = stop_vals[2] - else: - stop = False - stop_val = np.NaN - - # If we have either likelihood calls or iterations remaining, - # run our batch. - if mcall > 0 and miter > 0 and not stop: - # Compute our sampling bounds using the provided - # weight function. - logl_bounds = wt_function(res, nested_weight_kwargs) - lnz, lnzerr = res.logz[-1], res.logzerr[-1] - for results in dsampler.sample_batch(nlive_new=nested_nlive_batch, - logl_bounds=logl_bounds, - maxiter=miter, - maxcall=mcall, - save_bounds=nested_save_bounds): - - try: - # dynesty >= 2.0 - (worst, ustar, vstar, loglstar, nc, - worst_it, propidx, propiter, eff, blob) = results - except(ValueError): - # dynesty < 2.0 - (worst, ustar, vstar, loglstar, nc, - worst_it, propidx, propiter, eff) = results - ncall += nc - niter += 1 - if print_progress: - sys.stderr.write("\riter: {:d} | batch: {:d} | " - "nc: {:d} | ncall: {:d} | " - "eff(%): {:6.3f} | " - "loglstar: {:6.3f} < {:6.3f} " - "< {:6.3f} | " - "logz: {:6.3f} +/- {:6.3f} | " - "stop: {:6.3f} " - .format(niter, n+1, nc, ncall, - eff, logl_bounds[0], loglstar, - logl_bounds[1], lnz, lnzerr, - stop_val)) - sys.stderr.flush() - dsampler.combine_runs() - else: - # We're done! - break - - - return dsampler.results + return dict(points=points, log_weight=log_w, log_like=log_like) + + +# OMG +SAMPLER_KWARGS = { + +"dynesty_sampler_kwargs" : dict(nlive=None, + bound='multi', + sample='auto', + #periodic=None, + #reflective=None, + update_interval=None, + first_update=None, + npdim=None, + #rstate=None, + queue_size=None, + pool=None, + use_pool=None, + #logl_args=None, + #logl_kwargs=None, + #ptform_args=None, + #ptform_kwargs=None, + #gradient=None, + #grad_args=None, + #grad_kwargs=None, + #compute_jac=False, + enlarge=None, + bootstrap=None, + walks=None, + facc=0.5, + slices=None, + fmove=0.9, + max_move=100, + #update_func=None, + ncdim=None, + blob=False, + #save_history=False, + #history_filename=None) + ), +"dynesty_run_kwargs" : dict(nlive_init=None, # nlive0 + maxiter_init=None, + maxcall_init=None, + dlogz_init=0.01, + logl_max_init=np.inf, + n_effective_init=np.inf, # deprecated + nlive_batch=None, #nlive0 + wt_function=None, + wt_kwargs=None, + maxiter_batch=None, + maxcall_batch=None, + maxiter=None, + maxcall=None, + maxbatch=None, + n_effective=None, + stop_function=None, + stop_kwargs=None, + #use_stop=True, + #save_bounds=True, # doesn't hurt...? + print_progress=True, + #print_func=None, + live_points=None, + #resume=False, + #checkpoint_file=None, + #checkpoint_every=60) + ), +"ultranest_sampler_kwargs":dict(transform=None, + #derived_param_names=[], + #wrapped_params=None, + #resume='subfolder', + #run_num=None, + #log_dir=None, + #num_test_samples=2, + draw_multiple=True, + num_bootstraps=30, + #vectorized=False, + ndraw_min=128, + ndraw_max=65536, + storage_backend='hdf5', + warmstart_max_tau=-1, + ), +"ultranest_run_kwargs":dict(update_interval_volume_fraction=0.8, + update_interval_ncall=None, + #log_interval=None, + show_status=True, + #viz_callback='auto', + dlogz=0.5, + dKL=0.5, + frac_remain=0.01, + Lepsilon=0.001, + min_ess=400, + max_iters=None, + max_ncalls=None, + max_num_improvement_loops=-1, + min_num_live_points=400, + cluster_num_live_points=40, + insertion_test_window=10, + insertion_test_zscore_threshold=4, + region_class="MLFriends", + #widen_before_initial_plateau_num_warn=10000, + #widen_before_initial_plateau_num_max=50000, + ), +"nautilus_sampler_kwargs":dict(n_live=2000, + n_update=None, + enlarge_per_dim=1.1, + n_points_min=None, + split_threshold=100, + n_networks=4, + neural_network_kwargs={}, + #prior_args=[], + #prior_kwargs={}, + #likelihood_args=[], + #likelihood_kwargs={}, + n_batch=None, + n_like_new_bound=None, + #vectorized=False, + #pass_dict=None, + pool=None, + seed=None, + blobs_dtype=None, + #filepath=None, + #resume=True + ), +"nautilus_run_kwargs":dict(f_live=0.01, + n_shell=1, + n_eff=10000, + n_like_max=np.inf, + discard_exploration=False, + timeout=np.inf, + verbose=False + ), +} diff --git a/prospect/io/write_results.py b/prospect/io/write_results.py index 2d27c67a..79743d10 100644 --- a/prospect/io/write_results.py +++ b/prospect/io/write_results.py @@ -116,8 +116,8 @@ def write_hdf5(hfile, run_params, model, obs, # Sampling info if run_params.get("emcee", False): chain, extras = emcee_to_struct(sampler, model) - elif run_params.get("dynesty", False): - chain, extras = dynesty_to_struct(sampler, model) + elif run_params.get("nested", False): + chain, extras = nested_to_struct(sampler, model) else: chain, extras = None, None write_sampling_h5(hf, chain, extras) @@ -200,20 +200,16 @@ def emcee_to_struct(sampler, model): return chaincat, extras -def dynesty_to_struct(dyout, model): +def nested_to_struct(nested_out, model): # preamble - lnprior = model.prior_product(dyout['samples']) + lnprior = model.prior_product(nested_out['points']) # chaincat & extras - chaincat = chain_to_struct(dyout["samples"], model=model) - extras = dict(weights=np.exp(dyout['logwt']-dyout['logz'][-1]), - lnprobability=dyout['logl'] + lnprior, - lnlike=dyout['logl'], - efficiency=np.atleast_1d(dyout['eff']), - logz=np.atleast_1d(dyout['logz']), - ncall=np.atleast_1d(dyout['ncall']) - #ncall=json.dumps(dyout['ncall'].tolist()) - ) + chaincat = chain_to_struct(nested_out['points'], model=model) + extras = dict(weights=np.exp(nested_out['log_weight']), + lnprobability=nested_out['log_like'] + lnprior, + lnlike=nested_out['log_like'] + ) return chaincat, extras diff --git a/prospect/utils/prospect_args.py b/prospect/utils/prospect_args.py index 15c3d31a..7e257a5e 100644 --- a/prospect/utils/prospect_args.py +++ b/prospect/utils/prospect_args.py @@ -14,7 +14,7 @@ def show_default_args(): parser.print_help() -def get_parser(fitters=["optimize", "emcee", "dynesty"]): +def get_parser(fitters=["optimize", "emcee", "nested"]): """Get a default prospector argument parser """ @@ -46,8 +46,8 @@ def get_parser(fitters=["optimize", "emcee", "dynesty"]): if "emcee" in fitters: parser = add_emcee_args(parser) - if "dynesty" in fitters: - parser = add_dynesty_args(parser) + if "nested" in fitters: + parser = add_nested_args(parser) return parser @@ -108,10 +108,22 @@ def add_emcee_args(parser): return parser -def add_dynesty_args(parser): +def add_nested_args(parser): # --- dynesty parameters --- - parser.add_argument("--dynesty", action="store_true", - help="If set, do nested sampling with dynesty.") + parser.add_argument("--nested_sampler", type=str, default="", + choices=["", "dynesty", "nautilus", "ultranest"], + help="do sampling with this sampler") + + parser.add_argument("--nested_nlive", dest="nested_nlive", type=int, default=1000, + help="Number of live points for the nested sampling run.") + + parser.add_argument("--nested_target_n_effective", type=int, default=10000, + help=("Stop when the number of *effective* posterior samples as estimated " + "by dynesty reaches the target number.")) + + parser.add_argument("--nested_dlogz", type=float, default=0.05, + help=("Stop the initial run when the remaining evidence is estimated " + "to be less than this.")) parser.add_argument("--nested_bound", type=str, default="multi", choices=["single", "multi", "balls", "cubes"], @@ -119,42 +131,17 @@ def add_dynesty_args(parser): "One of single | multi | balls | cubes")) parser.add_argument("--nested_sample", "--nested_method", type=str, dest="nested_sample", - default="slice", choices=["unif", "rwalk", "slice"], + default="auto", choices=["auto", "unif", "rwalk", "slice", "hslice"], help=("Method for drawing new points during sampling. " - "One of unif | rwalk | slice")) + "One of auto | unif | rwalk | slice | hslice")) parser.add_argument("--nested_walks", type=int, default=48, help=("Number of Metropolis steps to take when " "`nested_sample` is 'rwalk'")) - parser.add_argument("--nlive_init", dest="nested_nlive_init", type=int, default=100, - help="Number of live points for the intial nested sampling run.") - - parser.add_argument("--nlive_batch", dest="nested_nlive_batch", type=int, default=100, - help="Number of live points for the dynamic nested sampling batches") - - parser.add_argument("--nested_dlogz_init", type=float, default=0.05, - help=("Stop the initial run when the remaining evidence is estimated " - "to be less than this.")) - - parser.add_argument("--nested_maxcall", type=int, default=int(5e7), - help=("Maximum number of likelihood calls during nested sampling. " - "This will only be enforced after the initial pass")) - - parser.add_argument("--nested_maxiter", type=int, default=int(1e6), - help=("Maximum number of iterations during nested sampling. " - "This will only be enforced after the initial pass")) - - parser.add_argument("--nested_maxbatch", type=int, default=10, - help="Maximum number of dynamic batches.") - parser.add_argument("--nested_bootstrap", type=int, default=0, - help=("Number of bootstrap resamplings to use when estimating " - "ellipsoid expansion factor.")) - - parser.add_argument("--nested_target_n_effective", type=int, default=10000, - help=("Stop when the number of *effective* posterior samples as estimated " - "by dynesty reaches the target number.")) + help=("Number of bootstrap resamplings to use when estimating " + "ellipsoid expansion factor.")) return parser