-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathreal_gsplat_train.sh
173 lines (145 loc) · 5.29 KB
/
real_gsplat_train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# Script to automate SplatFacto training on real data
#!/bin/bash
# Pre-requisites:
# 1. Install NeRFStudio
# 2. Install SAM & hloc into nerfstudio conda env
# 3. Download the following scripts
# 4. Move the scripts to corresponding locations or other user-specified dirs
# 5. Take images and organize them in the following folder structure:
# - rgb
# - frame_00000.png
# - frame_00001.png
# - ...
# - rgb_new
# - frame_00000.png
# - frame_00001.png
# - ...
# Set the default values for the arguments
DATA_FOLDER=${1:-"/home/ziqi/Desktop/test/WaterCube0"}
CUDA_VISIBLE_DEVICES=${2:-0}
TRAIN_IDX=${3:-"0 2 4 6"} # Indices of sparse images used for training
OUTPUT_FOLDER=${4:-"/home/ziqi/Packages/nerfstudio/outputs"}
KUBRIC_FOLDER=${5:-"/home/ziqi/Packages/kubric"}
NERFSTUDIO_FOLDER=${6:-"/home/ziqi/Packages/nerfstudio"}
MERGE_SCRIPT=${7:-"/home/ziqi/Packages/nerfstudio/scripts/merge_colmap_data.py"}
EDIT_SCRIPT=${8:-"/home/ziqi/Packages/nerfstudio/scripts/edit_nerf_data.py"}
UNDISTORT_SCRIPT=${9:-"/home/ziqi/Packages/nerfstudio/scripts/undistort_transforms.py"}
# ------------------ Setup Environment ------------------
# May need to build a conda env with dependencies required by the .py scripts
source ~/miniconda3/etc/profile.d/conda.sh
conda activate nerfgs
cd $NERFSTUDIO_FOLDER
export CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}
current_time=$(date +%Y-%m-%d_%H%M%S)
# ------------------ Structure from Motion ------------------
ns-process-data images --data ${DATA_FOLDER}/rgb --output-dir ${DATA_FOLDER} \
--sfm-tool hloc --feature-type sift --matcher-type adalam --num_downscales 0
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "SFM completed successfully."
else
echo "SFM failed." >&2
exit 1
fi
# Rename transforms.json files
mv ${DATA_FOLDER}/transforms.json ${DATA_FOLDER}/transforms_colmap.json
# ------------------ Camera Localization ------------------
# TODO: read camera params from colmap
python $NERFSTUDIO_FOLDER/nerfstudio/process_data/reloc_utils.py \
--image_dir ${DATA_FOLDER}/rgb_new \
--colmap_path ${DATA_FOLDER}/colmap \
--transforms_json transforms_colmap.json
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "Camera localization completed successfully."
else
echo "Camera localization failed." >&2
exit 1
fi
# Rename transforms.json files
mv ${DATA_FOLDER}/rgb_new/transforms.json ${DATA_FOLDER}/transforms_reloc.json
# ------------------ Process training data ------------------
# Convert the train_idx string to an array
read -a train_idx_arr <<< $TRAIN_IDX
# Convert the SfM data to nerf data
python $MERGE_SCRIPT -jo ${DATA_FOLDER}/transforms_colmap.json \
-jn ${DATA_FOLDER}/transforms_reloc.json \
-oo ${DATA_FOLDER}/transforms_pretrain.json \
-on ${DATA_FOLDER}/transforms.json \
-n "${train_idx_arr[@]}"
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "Colmap data conversion successful."
else
echo "Colmap data conversion failed." >&2
exit 1
fi
# Duplicate the rgb and rgb_new to rgb_distorted and rgb_new_distorted
cp -r ${DATA_FOLDER}/rgb ${DATA_FOLDER}/rgb_distorted
cp -r ${DATA_FOLDER}/rgb_new ${DATA_FOLDER}/rgb_new_distorted
# Undistort images
python $UNDISTORT_SCRIPT -s ${DATA_FOLDER}
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "Undistort images successful"
else
echo "Undistort images failed." >&2
exit 1
fi
# ------------------ Process data for training our method ------------------
# Add mask paths to the transforms.json file
python $EDIT_SCRIPT -i ${DATA_FOLDER}/transforms.json \
-o ${DATA_FOLDER}/transforms.json -am
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "Add mask paths completed successfully."
else
echo "Add mask paths failed." >&2
exit 1
fi
# Make the finetuning data by removing pre-train views in transforms.json file
python $EDIT_SCRIPT -i ${DATA_FOLDER}/transforms.json \
-o ${DATA_FOLDER}/transforms_finetune.json --remove_pretrain
# check if the previous command was successful
if [ $? -eq 0 ]
then
echo "Finetune data creation completed successfully."
else
echo "Finetune data creation failed." >&2
exit 1
fi
# ------------------ Pre-Training ------------------
while true; do
# Pre-trained NeRF: Train NeRFacto for the object-centric scene
ns-train splatfacto --vis viewer+tensorboard \
--experiment-name $(basename $DATA_FOLDER) \
--output-dir ${OUTPUT_FOLDER} \
--timestamp $current_time \
--steps_per_eval_all_images 1000 \
--pipeline.model.cull_alpha_thresh 0.005 \
--pipeline.model.continue_cull_post_densification=False \
--max-num-iterations 30000 \
--machine.num-devices 1 \
--viewer.quit-on-train-completion True \
nerfstudio-data --data ${DATA_FOLDER}/transforms_pretrain.json \
--auto-scale-poses=False --center-method none --orientation-method none \
--load-3D-points True \
--train_split_fraction 0.9
# check if the previous command was successful
if [ $? -ne 0 ]; then
echo "Training $(basename ${DATA_FOLDER}) failed." >&2
continue
fi
# If we reach here, pre-training was successful
break
done
# Detect 3D change in the scene
python $CHANGE_DET_SCRIPT \
-c ${OUTPUT_FOLDER}/$(basename ${DATA_FOLDER})/splatfacto/${current_time}/config.yml \
-t ${DATA_FOLDER}/transforms.json \
-o ${DATA_FOLDER}