Skip to content
Snippets Groups Projects
Commit 6572ce19 authored by Maki94's avatar Maki94
Browse files

scripts to run blender and dtu

parent 603cb081
No related merge requests found
......@@ -114,7 +114,6 @@ class ModelHiddenParams(ParamGroup):
self.color_model = 'linear' # mlp, hexplane, 'sh'
self.opacity_model = 'nerf' # nerf or volsdf
self.opacity_ones = False
self.use_deform_net = False
self.opt_pts = False
self.opt_pts_per_frame = False
self.encoder_query_scale = 1.0
......
# Example script to run 3DGS, 3DGS with Moran loss, and SplatFields3D on the Blender dataset (Table 2 in the main paper and Tab C1 and C2 in the supplementary material)
set -x
SCENE=lego # any of the Blender scenes
N_VIEWS=10 # in range (4 6 8 10 12)
DATASET_ROOT=/media/STORAGE_4TB/NeRF_datasets/nerf_synthetic
# # to reproduce 3DGS
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/$SCENE/${N_VIEWS}views/3DGS --is_static --n_views $N_VIEWS --iterations 40000 --pts_samples hull --max_num_pts 300000 --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/$SCENE/${N_VIEWS}views/3DGS --is_static --n_views $N_VIEWS --iterations 40000 --pts_samples hull --max_num_pts 300000 --load_time_step 0 --composition_rank 0
# to reproduce SplatFields3D
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/SplatFields --encoder_type VarTriPlaneEncoder --D 4 --lambda_norm 0.01 --test_iterations -1 --W 128 --n_views ${N_VIEWS} --iterations 40000 --pts_samples load --max_num_pts 100000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/SplatFields --encoder_type VarTriPlaneEncoder --D 4 --lambda_norm 0.01 --test_iterations -1 --W 128 --n_views ${N_VIEWS} --iterations 40000 --pts_samples load --max_num_pts 100000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
# to reproduce 3DGS w. L_moran
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/$SCENE/${N_VIEWS}views/3DGS_Lmoran --test_iterations -1 --is_static --n_views ${N_VIEWS} --iterations 40000 --pts_samples hull --max_num_pts 300000 --lambda_corr 0.01 --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/$SCENE/${N_VIEWS}views/3DGS_Lmoran --test_iterations -1 --is_static --n_views ${N_VIEWS} --iterations 40000 --pts_samples hull --max_num_pts 300000 --lambda_corr 0.01 --load_time_step 0 --composition_rank 0
# to reproduce the ablation study (Table 3 in the main paper)
# 1st row: basic (MLP-only) model:
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP --encoder_type none --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP --encoder_type none --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
# 2nd row: basic (MLP-only) + L_2 norm:
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_norm0.01 --lambda_norm 0.01 --encoder_type none --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_norm0.01 --lambda_norm 0.01 --encoder_type none --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
# 3rd row: basic (MLP-only) + tri-CNN:
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_CNN --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_CNN --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
# 4th row: full model (MLP+L_2 norm+tri-CNN):
python train.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_norm0.01_CNN --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
python render.py -s ${DATASET_ROOT}/${SCENE} --white_background --eval -m ./output_rep/Blender/${SCENE}/${N_VIEWS}views/MLP_norm0.01_CNN --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --test_iterations -1 --W 128 --n_views $N_VIEWS --iterations 40000 --pts_samples load --max_num_pts 300000 --pc_path ./output_rep/Blender/${SCENE}/${N_VIEWS}views/3DGS/point_cloud/iteration_40000/point_cloud.ply --load_time_step 0 --composition_rank 0
# ------------------------------------------------------------------------------
# Example script to run 3DGS and SplatFields3D on the DTU dataset
# RUN vanilla 3DGS on DTU dataset
python train.py -s /media/STORAGE_4TB/NeRF_datasets/DTU/2DGS_data/dtu/DTU/scan110 -m ./output_rep/dtu/scan110/3views/3DGS --white_background --lambda_mask 0.1 -r 2 --is_static --n_views 3 --iterations 30000
python render.py -s /media/STORAGE_4TB/NeRF_datasets/DTU/2DGS_data/dtu/DTU/scan110 -m ./output_rep/dtu/scan110/3views/3DGS --white_background --lambda_mask 0.1 -r 2 --is_static --n_views 3 --iterations 30000
# set the path to the DTU dataset
DATASET_ROOT=/media/STORAGE_4TB/NeRF_datasets/DTU/2DGS_data/dtu/DTU
SCENE=scan114 # scan105 scan106 scan110 scan114 scan118 scan122 scan24 scan37 scan40 scan55 scan63 scan65 scan69 scan83 scan97
N_VIEWS=3
python train.py -s $DATASET_ROOT/$SCENE -m ./output_rep/dtu/$SCENE/3views/3DGS --white_background --lambda_mask 0.1 -r 2 --is_static --n_views $N_VIEWS --iterations 30000
python render.py -s $DATASET_ROOT/$SCENE -m ./output_rep/dtu/$SCENE/3views/3DGS --white_background --lambda_mask 0.1 -r 2 --is_static --n_views $N_VIEWS --iterations 30000
# RUN SplatFields3D on DTU dataset
python train.py -s /media/STORAGE_4TB/NeRF_datasets/DTU/2DGS_data/dtu/DTU/scan110 -m ./output_rep/dtu/scan110/3views/SplatFields3D --pc_path ./output_rep/dtu/scan110/3views/3DGS/point_cloud/iteration_7000/point_cloud.ply --deform_weight 0 --white_background --lambda_mask 0.1 --n_views 3 --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --W 128 --iterations 30000 --max_num_pts 300000 -r 2 --load_time_step 0 --composition_rank 0
python render.py -s /media/STORAGE_4TB/NeRF_datasets/DTU/2DGS_data/dtu/DTU/scan110 -m ./output_rep/dtu/scan110/3views/SplatFields3D --pc_path ./output_rep/dtu/scan110/3views/3DGS/point_cloud/iteration_7000/point_cloud.ply --deform_weight 0 --white_background --lambda_mask 0.1 --n_views 3 --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --W 128 --iterations 30000 --max_num_pts 300000 -r 2 --load_time_step 0 --composition_rank 0
# ------------------------------------------------------------------------------
python train.py -s $DATASET_ROOT/$SCENE -m ./output_rep/dtu/$SCENE/3views/SplatFields3D --pc_path ./output_rep/dtu/$SCENE/3views/3DGS/point_cloud/iteration_1000/point_cloud.ply --deform_weight 0 --white_background --lambda_mask 0.1 --n_views $N_VIEWS --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --W 128 --iterations 30000 --max_num_pts 300000 -r 2 --load_time_step 0 --composition_rank 0
python render.py -s $DATASET_ROOT/$SCENE -m ./output_rep/dtu/$SCENE/3views/SplatFields3D --pc_path ./output_rep/dtu/$SCENE/3views/3DGS/point_cloud/iteration_1000/point_cloud.ply --deform_weight 0 --white_background --lambda_mask 0.1 --n_views $N_VIEWS --lambda_norm 0.01 --encoder_type VarTriPlaneEncoder --W 128 --iterations 30000 --max_num_pts 300000 -r 2 --load_time_step 0 --composition_rank 0
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment