diff --git a/algomodel/algo_center.py b/algomodel/algo_center.py index 439b5c2..bdd7fba 100644 --- a/algomodel/algo_center.py +++ b/algomodel/algo_center.py @@ -55,8 +55,7 @@ def stable_baselines3_algo(self,algo_model,alog_parameters,env): self.vf_coef = float(alog_parameters['vf_coef']) if 'vf_coef' in alog_parameters else 0.5 self.max_grad_norm = float( alog_parameters['max_grad_norm']) if 'max_grad_norm' in alog_parameters else 0.5 - alog_parameters['use_sde'] = alog_parameters['use_sde'] == "True" - self.use_sde = alog_parameters['use_sde'] if 'use_sde' in alog_parameters else False + self.use_sde = alog_parameters.get('use_sde', False) == 'True' self.sde_sample_freq = int( alog_parameters['sde_sample_freq']) if 'sde_sample_freq' in alog_parameters else -1 self.target_kl = alog_parameters['target_kl'] if 'target_kl' in alog_parameters else None diff --git a/config/test_mainlab.json b/config/test_mainlab.json index d934e82..3e777d6 100644 --- a/config/test_mainlab.json +++ b/config/test_mainlab.json @@ -348,14 +348,14 @@ "algorithm": { "system":"stable-baselines3", "algorithmModel": "DQN", - "totalTimeSteps": "2e4", + "totalTimeSteps": "2e5", "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", "algorithmParameters": { "learning_rate": "5e-5", "gamma": "0.1", "exploration_initial_eps": "1", - "exploration_final_eps": "0.3", - "exploration_fraction": "0.2" + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" } } }, @@ -430,6 +430,432 @@ "exploration_fraction": "0.3" } } + }, + "h000905DQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000905"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } + ,"h000905PPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000905"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + },"h000300DQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000300"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } } + ,"h000300PPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000300"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + },"h000016DQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000016"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } + ,"h000016PPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["h000016"], + "marketCountry": "zh", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + },"hDJIADQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hDJI"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } + ,"hDJIAPPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hDJI"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + },"hIXICDQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hIXIC"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } + ,"hIXICPPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hIXIC"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + },"hGSPCDQNsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hGSPC"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "DQN", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } + ,"hGSPCPPOsingle": { + "rlEnvInit": { + "envName": "env_strategy", + "envParameters": { + "codeList": ["hGSPC"], + "marketCountry": "us", + "balance": "10000000", + "strategyNum": "2", + "strategyInitDay": "35", + "actionStrategyId": "buy_sell", + "obsDayNum": "20", + "obsFactorNum": "5", + "obsPcaNum": "5", + "obsFactorNameList": ["mytt"], + "maxStrategyStepLimit": "2", + "maxStrategySellLimit": "1", + "normalizeType": "minmax", + "isDiscrete": "True", + "rewardId": "rank_reward" + } + + }, + "algorithm": { + "system":"stable-baselines3", + "algorithmModel": "PPO", + "totalTimeSteps": "1e4", + "//": "learningRate:学习率 entCoef:熵权重,action探索权重 nSteps:训练观察(动作)次数,建议根据训练数据长度定,最好是8的倍数 nEpochs:n次训练观察次数为1次总训练次数 batchSize:缓存大小,最好是nSteps的倍数", + "algorithmParameters": { + "learning_rate": "5e-5", + "gamma": "0.1", + "exploration_initial_eps": "1", + "exploration_final_eps": "0.06", + "exploration_fraction": "0.3" + } + } + } } \ No newline at end of file diff --git a/env/action/action.py b/env/action/action.py index 2838e0c..d9bf4cf 100644 --- a/env/action/action.py +++ b/env/action/action.py @@ -21,8 +21,95 @@ def action_strategy(self,strategy_num_choose,choose_action,balance,last_tradedat elif self.action_strategy_id == "two_bulin_rsi": return self.two_bulin_rsi(strategy_num_choose, choose_action, balance, last_tradedate, train_end_time, code_list, all_result_df) + elif self.action_strategy_id == "hold_buy_sell": + return self.hold_buy_sell(strategy_num_choose, choose_action, balance, last_tradedate, + train_end_time, code_list, all_result_df) + elif self.action_strategy_id == "buy_sell": + return self.buy_sell(strategy_num_choose, choose_action, balance, last_tradedate, train_end_time, code_list, all_result_df) else: raise ValueError("action_strategy_id is not exist") + def buy_sell(self, strategy_num_choose, choose_action, balance, last_tradedate, train_end_time, code_list, all_result_df): + one_strategy = strategy_num_choose[0] + two_strategy = strategy_num_choose[1] + buy_strategy_copy = copy.deepcopy(self.out_strategy) + sell_strategy_copy = copy.deepcopy(self.out_strategy) + + # 计算买入和卖出策略的结果 + buy_result = buy_strategy_copy.buy_strategy(cash=balance, start_date=last_tradedate, end_date=train_end_time, code_list=code_list) + sell_result = sell_strategy_copy.sell_strategy(cash=balance, start_date=last_tradedate, end_date=train_end_time, code_list=code_list) + + # 根据 choose_action 选择策略 + if choose_action == 0: + one_strategy += 1 + result_df = buy_result + self.out_strategy = buy_strategy_copy + else: + two_strategy += 1 + result_df = sell_result + self.out_strategy = sell_strategy_copy + + self.cap.append(choose_action) # 将当前选择加入到cap队列中 + strategy_num_choose = [one_strategy, two_strategy] + # 计算策略得分 + scores = [self.cal_reward.calculate_score(result) for result in [buy_result, sell_result]] + + # 给策略分配奖励 + reward = self.cal_reward.calculate_reward({"scores": scores, "action": choose_action, "cap": self.cap}) + + result_df = result_df[['value', 'cash', 'date', 'strategy_name', 'position']] + all_result_df = pd.concat([all_result_df, result_df]) + + return all_result_df, strategy_num_choose, reward, len(result_df) + + def hold_buy_sell(self, strategy_num_choose, choose_action, balance, last_tradedate, train_end_time, code_list, all_result_df): + one_strategy = strategy_num_choose[0] + two_strategy = strategy_num_choose[1] + three_strategy = strategy_num_choose[2] + hold_strategy_copy = copy.deepcopy(self.out_strategy) + buy_strategy_copy = copy.deepcopy(self.out_strategy) + sell_strategy_copy = copy.deepcopy(self.out_strategy) + + # 计算三个策略的结果 + hold_result = hold_strategy_copy.hold_strategy(cash=balance, start_date=last_tradedate, end_date=train_end_time, code_list=code_list) + buy_result = buy_strategy_copy.buy_strategy(cash=balance, start_date=last_tradedate, end_date=train_end_time, code_list=code_list) + sell_result = sell_strategy_copy.sell_strategy(cash=balance, start_date=last_tradedate, end_date=train_end_time, code_list=code_list) + + # print(f"hold_result:{hold_result}") + # print(f"buy_result:{buy_result}") + # print(f"sell_result:{sell_result}") + + # 根据 choose_action 选择策略 + if choose_action == 0: + one_strategy += 1 + result_df = hold_result + self.out_strategy = hold_strategy_copy + elif choose_action == 1: + two_strategy += 1 + result_df = buy_result + self.out_strategy = buy_strategy_copy + else: + three_strategy += 1 + result_df = sell_result + self.out_strategy = sell_strategy_copy + + self.cap.append(choose_action) # 将当前选择加入到cap队列中 + strategy_num_choose = [one_strategy, two_strategy, three_strategy] + # 计算策略得分 + scores = [] + for result in [hold_result, buy_result, sell_result]: + print(f"result:{result}") + score = self.cal_reward.calculate_score(result) + print(f"score:{score}") + scores.append(score) + print(f'scores:{scores}') + + # 给策略分配奖励 + reward = self.cal_reward.calculate_reward({"scores": scores, "action": choose_action, "cap": self.cap}) + + result_df = result_df[['value', 'cash', 'date', 'strategy_name', 'position']] + all_result_df = pd.concat([all_result_df, result_df]) + + return all_result_df, strategy_num_choose, reward, len(result_df) def two_bulin_rsi(self, strategy_num_choose, choose_action, balance, last_tradedate, train_end_time, code_list,all_result_df): one_strategy = strategy_num_choose[0] diff --git a/env/env_strategy_train.py b/env/env_strategy_train.py index 0676d06..913f6ae 100644 --- a/env/env_strategy_train.py +++ b/env/env_strategy_train.py @@ -73,8 +73,7 @@ def __init__(self,trade_env_parameters): self.obs_day_num = int(obs_day_num) self.obs_pca_num = int(obs_pca_num) self.is_train_random = is_train_random - - if is_discrete: + if is_discrete=="True": self.action_space = spaces.Discrete(self.strategy_num) else: self.action_space = spaces.Box( diff --git a/env/observation/observation.py b/env/observation/observation.py index bb522d9..d9838d4 100644 --- a/env/observation/observation.py +++ b/env/observation/observation.py @@ -106,7 +106,7 @@ def get_obs(self, current_step, df=pd.DataFrame()): code_df = code_df.reset_index(drop=True) # 提取需要的数据 data = code_df.iloc[current_step - self.obs_day_num: current_step][column_name_list].values - if data.shape[0] > 1: # 确保有足够的数据进行PCA + if data.shape[0] > self.obs_pca_num: # 确保有足够的数据进行PCA data = pca.fit_transform(data) # PCA降维 data = data.T min_max_scaler = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)) diff --git a/env/reward/calculate_reward.py b/env/reward/calculate_reward.py index 2e0d644..3a87907 100644 --- a/env/reward/calculate_reward.py +++ b/env/reward/calculate_reward.py @@ -76,8 +76,20 @@ def composite_strategy_score(self, df, risk_free_rate=0.02, weights=None): total_return = (df['value'].iloc[-1] / df['value'].iloc[0]) - 1 indicators = [sharpe_ratio, max_drawdown, total_return, annualized_return, annualized_volatility] + print(f'indicators:{indicators}') # 计算综合评分 score = sum(w * i for w, i in zip(weights, indicators)) - - return score \ No newline at end of file + print(f'indicators score:{score}') + + return score + def calculate_score(self,df): + """ + 计算得分:df的value的最后一个值减去第一个值 + :param df: 包含value列的DataFrame + :return: 计算得到的分数 + """ + if df.empty: + return 0 + score = df['value'].iloc[-1] - df['value'].iloc[0] + return score diff --git a/mainlab/mainlab.py b/mainlab/mainlab.py index 3523d9c..1bbfa44 100644 --- a/mainlab/mainlab.py +++ b/mainlab/mainlab.py @@ -57,7 +57,10 @@ def train_load_model(self): model.learn(total_timesteps=int(self.load_time_steps), callback=reward_callback) else: total_timesteps = self.mainlab_config.total_timesteps - model.learn(total_timesteps=total_timesteps, callback=reward_callback) + try: + model.learn(total_timesteps=total_timesteps, callback=reward_callback) + except Exception as e: + logger.error(f"Error occurred during model training: {e}") model_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resultmodel"),self.task_name) model.save(model_path) email_server(emailContext=self.task_name+" model is save",mail_host=self.mainlab_config.mail_host,mail_user=self.mainlab_config.mail_user,mail_pass=self.mainlab_config.mail_pass,receivers=self.mainlab_config.receivers) diff --git a/strategy/strategy.py b/strategy/strategy.py index f93e368..1e8879e 100644 --- a/strategy/strategy.py +++ b/strategy/strategy.py @@ -373,7 +373,7 @@ def achievement_strategy(end_flag=False): if self.bao.cash >= max_buy_cash_limit: self.bao.order_value(code, max_buy_cash_limit) else: - end_flag = True + continue if self.strategy_step == self.max_strategy_step_limit: end_flag = True @@ -403,8 +403,7 @@ def achievement_strategy(end_flag=False): for code in self.bao.code_list: if code in self.bao.positions: self.bao.order_target_amount(code, 0) # 卖出所有持仓 - else: - end_flag = True + if self.strategy_step == self.max_strategy_step_limit: end_flag = True