-
Notifications
You must be signed in to change notification settings - Fork 0
/
dgp.sh
52 lines (48 loc) · 1.26 KB
/
dgp.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/bin/bash
#SBATCH --job-name=DGP128
#SBATCH --partition=CLUSTER
#SBATCH -t 7-00:00
### e.g. request 2 nodes with 1 gpu each, totally 2 gpus (WORLD_SIZE==2)
### Note: --gres=gpu:x should equal to ntasks-per-node
#SBATCH --nodelist=compute-0-1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=20
#SBATCH -o ./output/%x-%j.out
#SBATCH -e ./output/%x-%j.err
### export MASTER_PORT=12342
### export WORLD_SIZE=1
### echo "NODELIST="${SLURM_NODELIST}
### master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
### export MASTER_ADDR=$master_addr
### echo "MASTER_ADDR="$MASTER_ADDR
module load python/anaconda3
module load cuda/cuda-11.4
source activate tf_tc
D='2048'
OBJ='wing'
for d in $D
do
for obj in $OBJ
do
python DGP_untrain.py \
--class -1 \
--seed 1314 \
--random_G \
--update_embed \
--lr_ratio 1.0 \
--select_num 1 \
--iterations 1000 1000 1000 1000 1000 \
--G_lrs 1.4e-5 1.4e-5 1.4e-5 0.7e-5 0.35e-5 \
--z_lrs 7e-4 7e-5 7e-5 7e-6 7e-7 \
--use_in False False False False False \
--resolution 256 \
--weights_root pretrained \
--load_weights 256 \
--G_ch 96 \
--G_shared \
--hier --dim_z 120 --shared_dim 128 \
--skip_init --use_ema \
--dims $d \
--object $obj
done
done