Skip to content

Commit ef51ffe

Browse files
committed
1.16.25
1 parent 5ada6d2 commit ef51ffe

File tree

4 files changed

+11
-8
lines changed

4 files changed

+11
-8
lines changed

pyabsa/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
# Copyright (C) 2021. All Rights Reserved.
88

99

10-
__version__ = '1.16.24'
10+
__version__ = '1.16.25'
1111

1212
__name__ = 'pyabsa'
1313

pyabsa/functional/dataset/dataset_manager.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -249,17 +249,17 @@ def detect_dataset(dataset_path, task='apc', load_aug=False):
249249
dataset_file['valid'] += find_files(search_path, [d, 'dev', task], exclude_key=['.inference', 'train.', 'test.'] + filter_key_words + ['.ignore'])
250250

251251
else:
252-
print('Try to load {} dataset from local'.format(dataset_path))
252+
print('Try to load {} dataset from local disk'.format(dataset_path))
253253
if load_aug:
254254
dataset_file['train'] += find_files(d, ['train', task], exclude_key=['.inference', 'test.', 'valid.'] + filter_key_words)
255255
dataset_file['test'] += find_files(d, ['test', task], exclude_key=['.inference', 'train.', 'valid.'] + filter_key_words)
256256
dataset_file['valid'] += find_files(d, ['valid', task], exclude_key=['.inference', 'train.'] + filter_key_words)
257257
dataset_file['valid'] += find_files(d, ['dev', task], exclude_key=['.inference', 'train.'] + filter_key_words)
258258
else:
259259
dataset_file['train'] += find_cwd_files([d, 'train', task], exclude_key=['.inference', 'test.', 'valid.'] + filter_key_words + ['.ignore'])
260-
dataset_file['test'] += find_cwd_files([d, 'train', task], exclude_key=['.inference', 'train.', 'valid.'] + filter_key_words + ['.ignore'])
261-
dataset_file['valid'] += find_cwd_files([d, 'train', task], exclude_key=['.inference', 'train.', 'test.'] + filter_key_words + ['.ignore'])
262-
dataset_file['valid'] += find_cwd_files([d, 'train', task], exclude_key=['.inference', 'train.', 'test.'] + filter_key_words + ['.ignore'])
260+
dataset_file['test'] += find_cwd_files([d, 'test', task], exclude_key=['.inference', 'train.', 'valid.'] + filter_key_words + ['.ignore'])
261+
dataset_file['valid'] += find_cwd_files([d, 'valid', task], exclude_key=['.inference', 'train.', 'test.'] + filter_key_words + ['.ignore'])
262+
dataset_file['valid'] += find_cwd_files([d, 'valid', task], exclude_key=['.inference', 'train.', 'test.'] + filter_key_words + ['.ignore'])
263263

264264
# # if we need train a checkpoint using as much data as possible, we can merge train, valid and test set as training sets
265265
# dataset_file['train'] = dataset_file['train'] + dataset_file['test'] + dataset_file['valid']

pyabsa/utils/pyabsa_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -215,8 +215,8 @@ def _load_word_vec(path, word2idx=None, embed_dim=300):
215215

216216

217217
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
218-
if not os.path.exists('run'):
219-
os.makedirs('run')
218+
if not os.path.exists('run/{}'.format(opt.dataset_name)):
219+
os.makedirs('run/{}'.format(opt.dataset_name))
220220
embed_matrix_path = 'run/{}'.format(os.path.join(opt.dataset_name, dat_fname))
221221
if os.path.exists(embed_matrix_path):
222222
print(colored('Loading cached embedding_matrix from {} (Please remove all cached files if there is any problem!)'.format(embed_matrix_path), 'green'))

release-note.json

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
{
2-
"1.16.24": {
2+
"1.16.25": {
33
"1": "Improves stability of dataset search, this is the final version of the 1.16.x release"
44
},
5+
"1.16.24": {
6+
"1": "Improves stability of dataset search"
7+
},
58
"1.16.23": {
69
"1": "Fix the positions output of ATEPC",
710
"2": "Fix a demo bug",

0 commit comments

Comments
 (0)