|
| 1 | +import os |
| 2 | +import sys |
| 3 | + |
| 4 | +from tqdm import tqdm |
| 5 | +import numpy as np |
| 6 | + |
| 7 | +import torch |
| 8 | +from sentence_transformers import SentenceTransformer |
| 9 | + |
| 10 | +from utils.args import create_embeddings_args |
| 11 | +from utils import embed |
| 12 | + |
| 13 | + |
| 14 | +parser = create_embeddings_args() |
| 15 | +args = parser.parse_args() |
| 16 | + |
| 17 | + |
| 18 | +TORCH_DTYPE = torch.float16 if args.use_amp_fp16 else torch.float32 |
| 19 | +NUMPY_DTYPE = np.float16 if args.use_amp_fp16 else np.float32 |
| 20 | +MAX_C4_SUBFILE_IDX = 1023 |
| 21 | +MAX_WIKIPEDIA_SUBFILE_IDX = 12 |
| 22 | +MAX_STACKEXCHANGE_SUBFILE_IDX = 13 |
| 23 | + |
| 24 | +device = args.device |
| 25 | +model = SentenceTransformer(args.model_name).to(device) |
| 26 | +db_path = args.saving_path + args.saving_file |
| 27 | + |
| 28 | +if not os.path.exists(args.saving_path): |
| 29 | + os.makedirs(args.saving_path) |
| 30 | + print(f"Created directory path {args.saving_path}") |
| 31 | + |
| 32 | +if args.group_name == "c4": |
| 33 | + c4_subfiles = [ |
| 34 | + f"{args.data_path}c4-train.{i:05d}-of-01024.json.gz" |
| 35 | + for i in range(MAX_C4_SUBFILE_IDX + 1) |
| 36 | + ] |
| 37 | + last_uid = embed.retrieve_last_saved_uid(db_path, args.group_name) |
| 38 | + |
| 39 | + # define next starting point from already processed files in the database |
| 40 | + if last_uid is not None: |
| 41 | + _, last_subfile_idx_text = last_uid.split(".") |
| 42 | + last_subfile_idx = int(last_subfile_idx_text) |
| 43 | + |
| 44 | + if last_subfile_idx == MAX_C4_SUBFILE_IDX: |
| 45 | + print("All C4 subfiles have been processed.") |
| 46 | + sys.exit(0) |
| 47 | + else: |
| 48 | + next_subfile_idx = last_subfile_idx + 1 |
| 49 | + else: |
| 50 | + next_subfile_idx = 0 |
| 51 | + |
| 52 | + # start processing |
| 53 | + for c4_subfile in tqdm(c4_subfiles[next_subfile_idx:], file=sys.stdout): |
| 54 | + uid = c4_subfile.split("/")[-1].split("-")[1] # uid = "train.xxxxx" |
| 55 | + print(f"Embedding file {uid}.") |
| 56 | + samples = embed.get_c4_subfile_texts(c4_subfile) |
| 57 | + if samples is not None: |
| 58 | + embeddings = embed.embed_samples( |
| 59 | + samples=samples, |
| 60 | + model=model, |
| 61 | + batch_size=args.batch_size, |
| 62 | + dtype=TORCH_DTYPE, |
| 63 | + to_cpu=True, |
| 64 | + ) |
| 65 | + embeddings = np.array(embeddings, dtype=NUMPY_DTYPE) |
| 66 | + embed.write_to_hdf5( |
| 67 | + path=db_path, |
| 68 | + group_name=args.group_name, |
| 69 | + uid=uid, |
| 70 | + embeddings=embeddings, |
| 71 | + compression=args.compression, |
| 72 | + compression_opts=args.compression_opts, |
| 73 | + ) |
| 74 | +elif args.group_name == "wikipedia": |
| 75 | + wikipedia_subfiles = [ |
| 76 | + f"{args.data_path}wiki_{i:02d}.jsonl" |
| 77 | + for i in range(MAX_WIKIPEDIA_SUBFILE_IDX + 1) |
| 78 | + ] |
| 79 | + last_uid = embed.retrieve_last_saved_uid(db_path, args.group_name) |
| 80 | + |
| 81 | + # define next starting point from already processed files in the database |
| 82 | + if last_uid is not None: |
| 83 | + _, last_subfile_idx_text = last_uid.split("_") |
| 84 | + last_subfile_idx = int(last_subfile_idx_text) |
| 85 | + |
| 86 | + if last_subfile_idx == MAX_WIKIPEDIA_SUBFILE_IDX: |
| 87 | + print("All Wikipedia subfiles have been processed.") |
| 88 | + sys.exit(0) |
| 89 | + else: |
| 90 | + next_subfile_idx = last_subfile_idx + 1 |
| 91 | + else: |
| 92 | + next_subfile_idx = 0 |
| 93 | + |
| 94 | + # start processing |
| 95 | + for wikipedia_subfile in tqdm( |
| 96 | + wikipedia_subfiles[next_subfile_idx:], file=sys.stdout |
| 97 | + ): |
| 98 | + uid = wikipedia_subfile.split("/")[-1].split(".")[0] # uid = "wiki_xx" |
| 99 | + print(f"Embedding file {uid}.") |
| 100 | + samples = embed.get_jsonl_subfile_texts(wikipedia_subfile) |
| 101 | + if samples is not None: |
| 102 | + embeddings = embed.embed_samples( |
| 103 | + samples=samples, |
| 104 | + model=model, |
| 105 | + batch_size=args.batch_size, |
| 106 | + dtype=TORCH_DTYPE, |
| 107 | + to_cpu=True, |
| 108 | + ) |
| 109 | + embeddings = np.array(embeddings, dtype=NUMPY_DTYPE) |
| 110 | + embed.write_to_hdf5( |
| 111 | + path=db_path, |
| 112 | + group_name=args.group_name, |
| 113 | + uid=uid, |
| 114 | + embeddings=embeddings, |
| 115 | + compression=args.compression, |
| 116 | + compression_opts=args.compression_opts, |
| 117 | + ) |
| 118 | +elif args.group_name == "wikihow": |
| 119 | + # there is only one file for wikihow |
| 120 | + last_uid = embed.retrieve_last_saved_uid(db_path, args.group_name) |
| 121 | + |
| 122 | + if last_uid is not None: |
| 123 | + print("Wikihow has been processed.") |
| 124 | + sys.exit(0) |
| 125 | + else: |
| 126 | + next_subfile_idx = 0 |
| 127 | + |
| 128 | + # start processing |
| 129 | + uid = args.data_path.split("/")[-1].split(".")[0] # uid = "train" |
| 130 | + print(f"Embedding file {uid}.") |
| 131 | + samples = embed.get_jsonl_subfile_texts(args.data_path) |
| 132 | + if samples is not None: |
| 133 | + embeddings = embed.embed_samples( |
| 134 | + samples=samples, |
| 135 | + model=model, |
| 136 | + batch_size=args.batch_size, |
| 137 | + dtype=TORCH_DTYPE, |
| 138 | + to_cpu=True, |
| 139 | + ) |
| 140 | + embeddings = np.array(embeddings, dtype=NUMPY_DTYPE) |
| 141 | + embed.write_to_hdf5( |
| 142 | + path=db_path, |
| 143 | + group_name=args.group_name, |
| 144 | + uid=uid, |
| 145 | + embeddings=embeddings, |
| 146 | + compression=args.compression, |
| 147 | + compression_opts=args.compression_opts, |
| 148 | + ) |
| 149 | +elif args.group_name == "stackexchange": |
| 150 | + stackexchange_subfiles = [ |
| 151 | + f"{args.data_path}stack_{i:02d}.jsonl" |
| 152 | + for i in range(MAX_STACKEXCHANGE_SUBFILE_IDX + 1) |
| 153 | + ] |
| 154 | + last_uid = embed.retrieve_last_saved_uid(db_path, args.group_name) |
| 155 | + |
| 156 | + # define next starting point from already processed files in the database |
| 157 | + if last_uid is not None: |
| 158 | + _, last_subfile_idx_text = last_uid.split("_") |
| 159 | + last_subfile_idx = int(last_subfile_idx_text) |
| 160 | + |
| 161 | + if last_subfile_idx == MAX_STACKEXCHANGE_SUBFILE_IDX: |
| 162 | + print("All Stackexchange subfiles have been processed.") |
| 163 | + sys.exit(0) |
| 164 | + else: |
| 165 | + next_subfile_idx = last_subfile_idx + 1 |
| 166 | + else: |
| 167 | + next_subfile_idx = 0 |
| 168 | + |
| 169 | + # start processing |
| 170 | + for stackexchange_subfile in tqdm( |
| 171 | + stackexchange_subfiles[next_subfile_idx:], file=sys.stdout |
| 172 | + ): |
| 173 | + uid = stackexchange_subfile.split("/")[-1].split(".")[0] # uid = "stack_xx" |
| 174 | + print(f"Embedding file {uid}.") |
| 175 | + samples = embed.get_jsonl_subfile_texts(stackexchange_subfile) |
| 176 | + if samples is not None: |
| 177 | + embeddings = embed.embed_samples( |
| 178 | + samples=samples, |
| 179 | + model=model, |
| 180 | + batch_size=args.batch_size, |
| 181 | + dtype=TORCH_DTYPE, |
| 182 | + to_cpu=True, |
| 183 | + ) |
| 184 | + embeddings = np.array(embeddings, dtype=NUMPY_DTYPE) |
| 185 | + embed.write_to_hdf5( |
| 186 | + path=db_path, |
| 187 | + group_name=args.group_name, |
| 188 | + uid=uid, |
| 189 | + embeddings=embeddings, |
| 190 | + compression=args.compression, |
| 191 | + compression_opts=args.compression_opts, |
| 192 | + ) |
0 commit comments