mihirma commited on
Commit
79fc249
·
verified ·
1 Parent(s): cbb3887

Add af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc

Browse files
Files changed (16) hide show
  1. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-11074bd2-09df-4856-bd07-fbc4dd6df28c1761329974242-2025_10_24-20.20.11.685/source.csv +0 -0
  2. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-2405e64f-1eba-4736-8f9f-c0fa4e22086f1760344445810-2025_10_13-10.34.47.298/source.csv +25 -0
  3. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-281ab054-0cef-4522-89fc-cb81d7f6b0c51760277578578-2025_10_12-16.01.21.980/source.csv +0 -0
  4. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-386afd79-6aa8-4e11-939b-d2205ae947e71760884674230-2025_10_19-16.38.53.787/source.csv +168 -0
  5. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-53807628-1d5f-454c-846d-8a22156439901761331069237-2025_10_24-20.39.31.264/source.csv +0 -0
  6. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-59804d2c-df27-4b41-9fe6-ffdfef8825021760703869419-2025_10_17-14.25.51.972/source.csv +3 -0
  7. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-5b002f72-29b2-49c4-8fb4-15e9a0f2c68a1760283746731-2025_10_12-17.43.07.478/source.csv +28 -0
  8. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-b510dc45-0390-4ba1-9cd3-8518acd9730d1761060552425-2025_10_21-17.29.37.898/source.csv +46 -0
  9. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-c0b85b13-745f-4849-b691-3865a0b92b6b1760870484089-2025_10_19-12.42.53.163/source.csv +0 -0
  10. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-e1782904-5e7c-4264-b342-b03f67dbe6421760704011771-2025_10_17-14.28.12.326/source.csv +24 -0
  11. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-ea3d4740-7761-4d5b-b6ed-89bdaaf7e91f1760906482985-2025_10_19-22.42.16.510/source.csv +69 -0
  12. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-eb7f35ad-ba70-4d81-93ba-d60c3e498ef11760100048914-2025_10_10-14.41.32.902/source.csv +0 -0
  13. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-ee716c9f-2894-4e4a-83fd-1afc9628a5fd1760867766844-2025_10_19-11.57.29.153/source.csv +203 -0
  14. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-efd7ba8c-a234-4233-92f3-7b2e61adffff1760041314363-2025_10_09-22.22.58.118/source.csv +0 -0
  15. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-fb287441-450c-488c-b0fb-c98a58fc5b261760876714373-2025_10_19-14.25.55.193/source.csv +0 -0
  16. af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-fc82b954-d473-479e-931a-c238d50a81b41761056077856-2025_10_21-16.15.07.951/source.csv +0 -0
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-11074bd2-09df-4856-bd07-fbc4dd6df28c1761329974242-2025_10_24-20.20.11.685/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-2405e64f-1eba-4736-8f9f-c0fa4e22086f1760344445810-2025_10_13-10.34.47.298/source.csv ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,5,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_grain_ablation\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\nexport CUDA_VISIBLE_DEVICES=0\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/npy_test\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_default/3528955\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=2048 \\n --patch_size=16 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --num_steps=10_000 \\n --log_image_interval=100_000 \\n --log \\n --log_checkpoint_interval=100_000 \\n --name=coinrun-dynamics-maskgit-grain-ablation-bs2048-$slurm_job_id \\n --tags coinrun dynamics maskgit grain-ablation bs2048 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir_train &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab
3
+ 2,1626,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:34:47 AM [info] Activating crowd-code\n10:34:47 AM [info] Recording started\n10:34:47 AM [info] Initializing git provider using file system watchers...\n10:34:47 AM [info] Git repository found\n10:34:47 AM [info] Git provider initialized successfully\n10:34:48 AM [info] Initial git state: [object Object]\n",Log,tab
4
+ 3,60415,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"",shellscript,tab
5
+ 4,64045,"TERMINAL",0,0,"git status",,terminal_command
6
+ 5,64094,"TERMINAL",0,0,"]633;C",,terminal_output
7
+ 6,64345,"TERMINAL",0,0,"On branch prepend-action-maskgit\r\nYour branch is up to date with 'origin/prepend-action-maskgit'.\r\n\r\nUntracked files:\r\n (use ""git add <file>..."" to include in what will be committed)\r\n\t checklist.md\r\n\tali-old-branch.diff\r\n\tappendix_c_nodes_embeddings_noise.md\r\n\tappendix_c_nodes_video_noise.md\r\n\tdata/_vizdoom.ini\r\n\tdata/jasmine_data/ViZDoomPPO/\r\n\tdata/jasmine_data/_vizdoom/\r\n\tdata/uv.lock\r\n\tdataset_duplicates.ipynb\r\n\tdiff.diff\r\n\tdiff2.diff\r\n\tdoom_job_starter.sh\r\n\tgifs/\r\n\tinput_pipeline/\r\n\tjasmine/train_dynamics_appendix-c.py\r\n\tjasmine/train_dynamics_appendix-c_main.py\r\n\tjasmine/train_dynamics_full_prec.py\r\n\tjasmine/train_tokenizer_full_precision.py\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tmessage.md\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\ttrain_dynamics_causal_3558251.log\r\n\tutils/\r\n\tuv.lock\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
8
+ 7,66883,"TERMINAL",0,0,"cd slurm/",,terminal_command
9
+ 8,67149,"TERMINAL",0,0,"ls",,terminal_command
10
+ 9,67159,"TERMINAL",0,0,"]633;Ccommon dev jobs README.md templates utils\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine/slurm",,terminal_output
11
+ 10,96722,"TERMINAL",0,0,"git status",,terminal_command
12
+ 11,96774,"TERMINAL",0,0,"]633;C",,terminal_output
13
+ 12,98177,"TERMINAL",0,0,"Refresh index: 89% (868/974)\r",,terminal_output
14
+ 13,98345,"TERMINAL",0,0,"Refresh index: 100% (974/974)\rRefresh index: 100% (974/974), done.\r\n",,terminal_output
15
+ 14,98511,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add <file>..."" to update what will be committed)\r\n (use ""git restore <file>..."" to discard changes in working directory)\r\n\tmodified: jobs/mihir/horeka/coinrun/ablations/train_dyn_default-no-noise-main.sh\r\n\tmodified: jobs/mihir/horeka/coinrun/default_runs/train_dyn_default.sh\r\n\r\nUntracked files:\r\n (use ""git add <file>..."" to include in what will be committed)\r\n\tjobs/mihir/horeka/coinrun/ablations/appendix-c.sh\r\n\tjobs/mihir/horeka/coinrun/ablations/train_dyn_default-no-noise-ac-prepend.sh\r\n\tjobs/mihir/horeka/coinrun/causal_sweep/\r\n\tjobs/mihir/horeka/coinrun/default_runs/train_dyn_causal.sh\r\n\tjobs/mihir/horeka/coinrun/speed_ablation/\r\n\tjobs/mihir/horeka/doom/\r\n\tjobs/mihir/horeka/preprocessing/doom/\r\n\tjobs/mihir/horeka/preprocessing/train_doom_agent.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine/slurm",,terminal_output
16
+ 15,106569,"TERMINAL",0,0,"git add jobs/mihir/",,terminal_command
17
+ 16,106616,"TERMINAL",0,0,"]633;C",,terminal_output
18
+ 17,107438,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:~/Projects/jasmine/slurm",,terminal_output
19
+ 18,127741,"TERMINAL",0,0,"git commit -m ""added more scripts""",,terminal_command
20
+ 19,127744,"TERMINAL",0,0,"]633;C",,terminal_output
21
+ 20,128322,"TERMINAL",0,0,"[main 702e5e1] added more scripts\r\n 48 files changed, 2471 insertions(+), 14 deletions(-)\r\n create mode 100644 jobs/mihir/horeka/coinrun/ablations/appendix-c.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/ablations/train_dyn_default-no-noise-ac-prepend.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/causal_sweep/sweeper.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/causal_sweep/train_dyn_causal.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/default_runs/train_dyn_causal.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_1024/train_dyn_default.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_1024/train_dyn_default_ffn_dim_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_1024/train_dyn_default_flash_attn_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_1024/train_dyn_default_mixed_precision_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_1024/train_dyn_grain_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_default.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_default_ffn_dim_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_default_flash_attn_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_default_mixed_precision_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_dyn_default.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_dyn_default_500k.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_lam_default_1node.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_tokenizer_default_1node.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_tokenizer_default_1node_patch_size_4.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution120x160/train_tokenizer_default_1node_requeue.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/sample.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_dyn_default.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_dyn_default_500k.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_dyn_default_bigger.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_dyn_default_patch_size_4.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_lam_default_1gpu.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_tokenizer_default_1gpu.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_tokenizer_default_1gpu_500k.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_tokenizer_default_1gpu_bigger.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_tokenizer_default_1gpu_higher_lr.sh\r\n create mode 100644 jobs/mihir/horeka/doom/resolution60x80/train_tokenizer_default_1gpu_patch_size_4.sh\r\n create mode 100644 jobs/mihir/horeka/doom/train_tokenizer_default_1gpu.sh\r\n create mode 100644 jobs/mihir/horeka/doom/train_tokenizer_default_1node.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked._sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_10m.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_10m_120x160.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_10m_60x80.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_1m.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_1m_120x160.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_1m_60x80.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_50m.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_50m_120x160.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_50m_60x80.sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/doom/doom_chunked_test._sh\r\n create mode 100644 jobs/mihir/horeka/preprocessing/train_doom_agent.sh\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine/slurm",,terminal_output
22
+ 21,2694147,"TERMINAL",0,0,"cd ..",,terminal_command
23
+ 22,2694314,"TERMINAL",0,0,"ls",,terminal_command
24
+ 23,2694366,"TERMINAL",0,0,"]633;C",,terminal_output
25
+ 24,2694606,"TERMINAL",0,0," ali-old-branch.diff diff.diff killer_partition.sh __pycache__ tests\r\n appendix_c_nodes_embeddings_noise.md doom_job_starter.sh killer.sh pyproject.toml train_dynamics_causal_3558251.log\r\n appendix_c_nodes_video_noise.md frame-knoms.png LICENSE README.md utils\r\n' checklist.md' frame.png log.log requirements-franz.txt uv.lock\r\n data frames logs samples wandb\r\n dataset_duplicates.ipynb gifs message.md scripts_cremers\r\n debug input_pipeline models slurm\r\n diff2.diff jasmine overfit_dir.zip test.py\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-281ab054-0cef-4522-89fc-cb81d7f6b0c51760277578578-2025_10_12-16.01.21.980/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-386afd79-6aa8-4e11-939b-d2205ae947e71760884674230-2025_10_19-16.38.53.787/source.csv ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,1877,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:38:53 PM [info] Activating crowd-code\n4:38:53 PM [info] Recording started\n4:38:53 PM [info] Initializing git provider using file system watchers...\n4:38:54 PM [info] Git repository found\n4:38:54 PM [info] Git provider initialized successfully\n4:38:54 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,90716,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dyn_single_gpu\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3583033\n\nenv | grep SLURM\n\nexport JAX_COMPILER_ENABLE_REMAT_PASS=False\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=8 \\n --dyna_dim=2048 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=32 \\n --dyna_ffn_dim=8192 \\n --num_steps=5000 \\n --wsd_decay_steps=0 \\n --warmup_steps=0 \\n --patch_size=2 \\n --log \\n --name=coinrun-dyn-big-model-single-gpu-bs-8-$slurm_job_id \\n --tags dyn coinrun big-model single-gpu \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir_train \\n --tokenizer_checkpoint $tokenizer_checkpoint\n",shellscript,tab
4
+ 4,92083,"TERMINAL",0,0,"watch",,terminal_focus
5
+ 5,93886,"TERMINAL",0,0,"slurm/dev/franz/berlin/atari/dynamics_from_continued_from_40k_tokenizer/dynamics.shslurm/dev/franz/berlin/atari/dynamics_from_continued_from_40k_tokenizer/spawn_dynamics.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6/slurm/dev/franz/berlin/atari/tokenizer_lr_3e-6/tokenizer.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_continue_from_40k_to_200k/slurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_continue_from_40k_to_200k/spawn_tokenizers.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_continue_from_40k_to_200k/tokenizer.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams/slurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams/spawn_tokenizers.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams/tokenizer.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams_patch_size_4/slurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams_patch_size_4/spawn_tokenizers.shslurm/dev/franz/berlin/atari/tokenizer_lr_3e-6_fleuret_hparams_patch_size_4/tokenizer.shslurm/jobs/alfred/berlin/workshop/slurm/jobs/alfred/berlin/workshop/case_study/case_study_vanilla_genie_10M/slurm/jobs/alfred/berlin/workshop/case_study/case_study_vanilla_genie_10M/dynamics_case_study_dataset_10M.sbatchslurm/jobs/alfred/berlin/workshop/case_study/case_study_vanilla_genie_10M/sample_10M.sbatchslurm/jobs/alfred/berlin/workshop/case_study/case_study_vanilla_genie_10M/sample_10M_action_prepend.sbatchslurm/jobs/alfred/berlin/workshop/case_study/case_study_vanilla_genie_10M/tokenizer.sbatchslurm/jobs/alfred/berlin/workshop/case_study/misc/case_study_vanilla_genie/debug/slurm/jobs/alfred/berlin/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_10M_genie_default.shslurm/jobs/alfred/berlin/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_10M_jasmine_default.shslurm/jobs/alfred/berlin/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M_genie_default.shslurm/jobs/alfred/berlin/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M_jasmine_default.shslurm/jobs/alfred/berlin/workshop/speed_ablation/slurm/jobs/alfred/berlin/workshop/speed_ablation/chunk_ablation_runs/slurm/jobs/alfred/berlin/workshop/speed_ablation/chunk_ablation_runs/dynamics_chunk_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunk_ablation_runs/dynamics_chunk_ablation_spawner_batch_2048.shslurm/jobs/alfred/berlin/workshop/speed_ablation/chunk_ablation_runs/dynamics_chunk_ablation_spawner_batch_36.shslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_ablation_dataset/slurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_ablation_dataset/generate_data_10M_chunksize.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_ablation_dataset/generate_data_10M_chunksize_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/slurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_1.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_10.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_100.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_1000.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_10000.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/slurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_base_speed_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_base_speed_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_ffn_dim_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_ffn_dim_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_flash_attn_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_flash_attn_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_grain_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_grain_ablation_spwaner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_no_flash_attn_full_prec.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/max_out/coinrun_dynamics_no_flash_attn_full_prec_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_base/slurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_base/coinrun_dynamics_base_speed_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_base/coinrun_dynamics_base_speed_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_ffn/slurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_flash_attention/slurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_full_prec/slurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec.sbatchslurm/jobs/alfred/berlin/workshop/speed_ablation/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec_spawner.shslurm/jobs/alfred/berlin/workshop/throughput/slurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_base/slurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_base/coinrun_dynamics_base_speed_ablation.sbatchslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_base/coinrun_dynamics_base_speed_ablation_batch_36.sbatchslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_base/coinrun_dynamics_base_speed_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_ffn/slurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation.sbatchslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_flash_attention/slurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation.sbatchslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation_spawner.shslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_full_prec/slurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec.sbatchslurm/jobs/alfred/berlin/workshop/throughput/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec_spawner.shslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/slurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/lam_jafar_reproduction_dataset_50M_requeue.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue_debug.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_requeue.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/slurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue.sbatchslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_requeue.sbatchslurm/jobs/franz/berlin/doom/generate_dataset/slurm/jobs/franz/berlin/doom/generate_dataset/generate_doom_dataset_10m.shslurm/jobs/mihir/horeka/coinrun/slurm/jobs/mihir/horeka/coinrun/big-model/slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.shslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.shslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.shslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.shslurm/jobs/mihir/horeka/coinrun/big-model/train_tokenizer_path_size_1.shslurm/jobs/mihir/horeka/coinrun/big-model/train_tokenizer_path_size_2.shslurm/jobs/mihir/horeka/coinrun/default_runs/train_tokenizer_default.shsent 253,918 bytes received 1,881 bytes 24,361.81 bytes/sectotal size is 28,954,949 speedup is 113.19(jasmine) [tum_cte0515@hkn1993 jasmine]$ runner(jasmine) [tum_cte0515@hkn1993 jasmine_jobs]$ sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.shSubmitted batch job 3583200(jasmine) [tum_cte0515@hkn1993 jasmine_jobs]$ sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.shSubmitted batch job 3583201(jasmine) [tum_cte0515@hkn1993 jasmine_jobs]$ sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.shSubmitted batch job 3583203(jasmine) [tum_cte0515@hkn1993 jasmine_jobs]$ sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.shSubmitted batch job 3583204(jasmine) [tum_cte0515@hkn1993 jasmine_jobs]$ queue",,terminal_command
6
+ 6,95994,"TERMINAL",0,0,"scancel --me",,terminal_command
7
+ 7,96048,"TERMINAL",0,0,"]633;C",,terminal_output
8
+ 8,96061,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
9
+ 9,98861,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dyn_no_flash_attn_single_gpu\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3583033\n\nenv | grep SLURM\n\nexport JAX_COMPILER_ENABLE_REMAT_PASS=False\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=8 \\n --num_steps=500 \\n --wsd_decay_steps=0 \\n --warmup_steps=0 \\n --dyna_dim=2048 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=32 \\n --dyna_ffn_dim=8192 \\n --patch_size=2 \\n --no-use-flash-attention \\n --log \\n --name=coinrun-dyn-big-model-no-flash-attn-single-gpu-bs-8-$slurm_job_id \\n --tags dyn coinrun big-model no-flash-attn single-gpu \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir_train \\n --tokenizer_checkpoint $tokenizer_checkpoint\n",shellscript,tab
10
+ 10,102168,"TERMINAL",0,0,"bash",,terminal_focus
11
+ 11,104116,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dyn_no_flash_attn_single_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3583033\n\nenv | grep SLURM\n\nexport CUDA_VISIBLE_DEVICES=0\nexport JAX_COMPILER_ENABLE_REMAT_PASS=False\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --num_steps=5000 \\n --wsd_decay_steps=0 \\n --warmup_steps=0 \\n --dyna_dim=2048 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=32 \\n --dyna_ffn_dim=8192 \\n --patch_size=2 \\n --no-use-flash-attention \\n --log \\n --name=coinrun-dyn-big-model-no-flash-attn-single-node-$slurm_job_id \\n --tags dyn coinrun big-model no-flash-attn single-node \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir_train \\n --tokenizer_checkpoint $tokenizer_checkpoint\n",shellscript,tab
12
+ 12,105277,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1431,0,"",shellscript,selection_mouse
13
+ 13,106237,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1430,1,"",shellscript,content
14
+ 14,106574,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1430,0,"8",shellscript,content
15
+ 15,106578,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1431,0,"",shellscript,selection_keyboard
16
+ 16,108220,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dyn_single_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_500m_gt_actions_split/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3583033\n\nenv | grep SLURM\n\nexport CUDA_VISIBLE_DEVICES=0\nexport JAX_COMPILER_ENABLE_REMAT_PASS=False\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --dyna_dim=2048 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=32 \\n --dyna_ffn_dim=8192 \\n --num_steps=5000 \\n --wsd_decay_steps=0 \\n --warmup_steps=0 \\n --patch_size=2 \\n --log \\n --name=coinrun-dyn-big-model-single-node-$slurm_job_id \\n --tags dyn coinrun big-model single-node \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir_train \\n --tokenizer_checkpoint $tokenizer_checkpoint\n",shellscript,tab
17
+ 17,109184,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1417,0,"",shellscript,selection_mouse
18
+ 18,110121,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1416,1,"",shellscript,content
19
+ 19,110366,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1416,0,"8",shellscript,content
20
+ 20,110367,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1417,0,"",shellscript,selection_keyboard
21
+ 21,119207,"TERMINAL",0,0,"salloc --time=02:00:00 --partition=accelerated-h100 --nodes=1 --gres=gpu:1 --cpus-per-task=8",,terminal_command
22
+ 22,119269,"TERMINAL",0,0,"]633;Csalloc: Pending job allocation 3583319\r\nsalloc: job 3583319 queued and waiting for resources\r\n",,terminal_output
23
+ 23,120843,"TERMINAL",0,0,"bash",,terminal_focus
24
+ 24,121790,"TERMINAL",0,0,"idling",,terminal_command
25
+ 25,121898,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1993.localdomain: Sun Oct 19 16:40:55 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 191 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated: 55 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 8 nodes idle\rPartition accelerated-h200:\t 5 nodes idle",,terminal_output
26
+ 26,122973,"TERMINAL",0,0,"6",,terminal_output
27
+ 27,123970,"TERMINAL",0,0,"7",,terminal_output
28
+ 28,124388,"TERMINAL",0,0,"salloc",,terminal_focus
29
+ 29,125013,"TERMINAL",0,0,"8",,terminal_output
30
+ 30,126158,"TERMINAL",0,0,"9",,terminal_output
31
+ 31,126623,"TERMINAL",0,0,"^Csalloc: Job allocation 3583319 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output
32
+ 32,127092,"TERMINAL",0,0,"1:00",,terminal_output
33
+ 33,128163,"TERMINAL",0,0,"1",,terminal_output
34
+ 34,129175,"TERMINAL",0,0,"2",,terminal_output
35
+ 35,130211,"TERMINAL",0,0,"3",,terminal_output
36
+ 36,131259,"TERMINAL",0,0,"4",,terminal_output
37
+ 37,132299,"TERMINAL",0,0,"5",,terminal_output
38
+ 38,133357,"TERMINAL",0,0,"7",,terminal_output
39
+ 39,134383,"TERMINAL",0,0,"8",,terminal_output
40
+ 40,135088,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated-h100 --nodes=1 --gres=gpu:1 --cpus-per-task=8",,terminal_command
41
+ 41,135158,"TERMINAL",0,0,"]633;Csalloc: Pending job allocation 3583320\r\nsalloc: job 3583320 queued and waiting for resources\r\n",,terminal_output
42
+ 42,135419,"TERMINAL",0,0,"9",,terminal_output
43
+ 43,136466,"TERMINAL",0,0,"10",,terminal_output
44
+ 44,136814,"TERMINAL",0,0,"watch",,terminal_focus
45
+ 45,137504,"TERMINAL",0,0,"1",,terminal_output
46
+ 46,137744,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
47
+ 47,141543,"TERMINAL",0,0,"sync-runner",,terminal_command
48
+ 48,141603,"TERMINAL",0,0,"]633;Csending incremental file list\r\n",,terminal_output
49
+ 49,143233,"TERMINAL",0,0,"slurm/dev/franz/berlin/atari/\r\nslurm/dev/franz/berlin/atari/sample/\r\nslurm/dev/franz/berlin/atari/sample/sample_atari.sh\r\nslurm/dev/franz/berlin/atari/sample/spawn_sampler.sh\r\nslurm/dev/franz/berlin/coinrun/sample/maskgit/\r\nslurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh\r\nslurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh\r\nslurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh\r\nslurm/jobs/alfred/berlin/workshop/\r\nslurm/jobs/alfred/berlin/workshop/sampling/\r\nslurm/jobs/alfred/berlin/workshop/sampling/sample_jasmine_action_add.sh\r\nslurm/jobs/alfred/berlin/workshop/sampling/sample_jasmine_action_prepend.sh\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/dynamics_jafar_reproduction_dataset_50M.sbatch\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/dynamics_jafar_reproduction_dataset_50M_no_print.sbatch\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue.sbatch\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue_new_env.sbatch\r\nslurm/jobs/alfred/berlin/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_requeue_new_env.sbatch\r\nslurm/jobs/alfred/horeka/\r\nslurm/jobs/alfred/horeka/workshop/\r\nslurm/jobs/alfred/horeka/workshop/case_study/\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/dynamics_case_study_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/dynamics_case_study_dataset_10M_action_prepend.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/sample_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/sample_10M_action_prepend.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/tokenizer.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/debug/\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/debug/lam.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_10M/debug/tokenizer.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_240M_w_chunking/\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_240M_w_chunking/tokenizer_chunking_100.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_240M_w_chunking/tokenizer_chunking_10k.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_50M/\r\nslurm/jobs/alfred/horeka/workshop/case_study/case_study_vanilla_genie_50M/tokenizer_action_prepend.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/batch_scaling/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/batch_scaling/dynamics_batch_scaling.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/batch_scaling/spawner.sh\r\n",,terminal_output
50
+ 50,144099,"TERMINAL",0,0,"slurm/jobs/alfred/horeka/workshop/case_study/misc/case_study/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study/tokenizer.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study/debug/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study/debug/tokenizer.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/tokenizer.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/dynamics_10M_patch_16.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/dynamics_10M_patch_16_prepend.sbatch\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_10M_genie_default.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_10M_jasmine_default.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M_genie_default.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M_jasmine_default.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/lam_50M_wsd.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/case_study_vanilla_genie/debug/tokenizer.sh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/non_co_training/\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/non_co_training/lam_10Msh\r\nslurm/jobs/alfred/horeka/workshop/case_study/misc/non_co_training/lam_50M.sh\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_10M_arr_rec_no_seeding.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_10M_npy_arr_rec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_240M_chunks_per_file_100.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_240M_chunks_per_file_10k.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_240M_npy_arr_rec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_250M_npy_arr_rec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/generate_coinrun_data/generate_data_250M_npy_arr_rec_test_val.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunk_ablation_runs/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunk_ablation_runs/dynamics_chunk_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunk_ablation_runs/dynamics_chunk_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_ablation_dataset/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_ablation_dataset/generate_data_10M_chunksize.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_ablation_dataset/generate_data_10M_chunksize_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_1.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_10.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_100.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_1000.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/chunking_size_ablation_dataset/generate_data_10M_chunks_per_file_10000.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_base_speed_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_base_speed_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_ffn_dim_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_ffn_dim_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_flash_attn_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_flash_attn_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_grain_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_grain_ablation_spwaner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_no_flash_attn_full_prec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/max_out/coinrun_dynamics_no_flash_attn_full_prec_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_base/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_base/coinrun_dynamics_base_speed_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_base/coinrun_dynamics_base_speed_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_ffn/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_flash_attention/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_full_prec/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/train_dyn_default.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/train_dyn_default_ffn_dim_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/train_dyn_default_flash_attn_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/train_dyn_default_mixed_precision_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_2048/train_dyn_grain_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/train_dyn_default.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/train_dyn_default_ffn_dim_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/train_dyn_default_flash_attn_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/train_dyn_default_mixed_precision_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/batch_size_36/train_dyn_grain_ablation.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/chunk/\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/chunk/train_chunk_per_file_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/chunk/train_chunk_size_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/speed_ablation_horeka/chunk/train_dynamics.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/generate_data_10M_chunksize.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/generate_data_10M_chunksize_base.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/generate_data_10M_chunksize_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/chunking_size/\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/chunking_size/generate_data_10M_chunks_per_file.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/dataset_gen/chunking_size/generate_data_10M_chunks_per_file_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_base/\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_base/coinrun_dynamics_base_speed_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_base/coinrun_dynamics_base_speed_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_ffn/\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_ffn/coinrun_dynamics_ffn_dim_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_flash_attention/\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_flash_attention/coinrun_dynamics_flash_attn_ablation_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_full_prec/\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec.sbatch\r\nslurm/jobs/alfred/horeka/workshop/throughput/speed_ablation_full_prec/coinrun_dynamics_no_flash_attn_full_prec_spawner.sh\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/lam_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/lam_jafar_reproduction_dataset_10M_auto_requeue_from_start.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/lam_jafar_reproduction_dataset_10M_auto_requeue_from_start_test.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/tokenizer_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/tokenizer_jafar_reproduction_dataset_10M_auto_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/tokenizer_jafar_reproduction_dataset_10M_auto_requeue_from_start.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/misc/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/misc/tokenizer_jafar_reproduction_dataset_10M_patch_size_4.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/requeue/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/requeue/lam_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/requeue/lam_jafar_reproduction_dataset_10M_requeue_auto.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/requeue/tokenizer_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_10M/requeue/tokenizer_jafar_reproduction_dataset_10M_patch_size_4.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_250M/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_250M/lam_jafar_reproduction_dataset_250M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_250M/tokenizer_jafar_reproduction_dataset_250M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_250M/tokenizer_jafar_reproduction_dataset_250M_patch_size_4.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/lam_jafar_reproduction_dataset_50M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/lam_jafar_reproduction_dataset_50M_no_print.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/lam_jafar_reproduction_dataset_50M_no_print_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/lam_jafar_reproduction_dataset_50M_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue_debug.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/tokenizer_jafar_reproduction_dataset_50M_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/misc/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/misc/tokenizer_jafar_reproduction_dataset_10M_patch_size_4.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue/lam_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue/lam_jafar_reproduction_dataset_10M_requeue_auto.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue/tokenizer_jafar_reproduction_dataset_10M.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue/tokenizer_jafar_reproduction_dataset_10M_patch_size_4.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_no_print_requeue.sbatch\r\nslurm/jobs/alfred/horeka/workshop/time_till_convergence/jafar_reproduction_50M/requeue_auto/tokenizer_jafar_reproduction_dataset_50M_requeue.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh\r\n",,terminal_output
51
+ 51,144596,"TERMINAL",0,0,"\r\nsent 286,316 bytes received 3,068 bytes 82,681.14 bytes/sec\r\ntotal size is 29,164,292 speedup is 100.78\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
52
+ 52,146262,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",0,0,"",shellscript,tab
53
+ 53,147965,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",0,0,"",shellscript,tab
54
+ 54,149134,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1729,0,"",shellscript,selection_mouse
55
+ 55,150348,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1730,0,"bs-8-",shellscript,content
56
+ 56,150351,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1735,0,"",shellscript,selection_command
57
+ 57,151310,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1811,0,"",shellscript,selection_command
58
+ 58,151598,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1810,0,"",shellscript,selection_command
59
+ 59,152456,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",1810,0,"bs8 ",shellscript,content
60
+ 60,153686,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",0,0,"",shellscript,tab
61
+ 61,155527,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1703,0,"",shellscript,selection_mouse
62
+ 62,156169,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1704,0,"bs8 ",shellscript,content
63
+ 63,156171,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1708,0,"",shellscript,selection_command
64
+ 64,156981,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",0,0,"",shellscript,tab
65
+ 65,158715,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1671,0,"bs-8-",shellscript,content
66
+ 66,158717,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1676,0,"",shellscript,selection_command
67
+ 67,158976,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1737,0,"bs8 ",shellscript,content
68
+ 68,158977,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",1741,0,"",shellscript,selection_command
69
+ 69,161328,"TERMINAL",0,0,"salloc",,terminal_focus
70
+ 70,162247,"TERMINAL",0,0,"bash",,terminal_focus
71
+ 71,166759,"TERMINAL",0,0,"sync-runner",,terminal_command
72
+ 72,166837,"TERMINAL",0,0,"]633;Csending incremental file list\r\n",,terminal_output
73
+ 73,167005,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh\r\nslurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh\r\n\r\nsent 66,436 bytes received 432 bytes 133,736.00 bytes/sec\r\ntotal size is 29,164,314 speedup is 436.15\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
74
+ 74,172214,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",0,0,"",shellscript,tab
75
+ 75,174071,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",0,0,"",shellscript,tab
76
+ 76,175194,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",0,0,"",shellscript,tab
77
+ 77,177851,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",0,0,"",shellscript,tab
78
+ 78,179683,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",1422,0,"",shellscript,selection_mouse
79
+ 79,181017,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",1422,0,"0",shellscript,content
80
+ 80,181019,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",1423,0,"",shellscript,selection_keyboard
81
+ 81,182276,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",0,0,"",shellscript,tab
82
+ 82,182910,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",0,0,"",shellscript,tab
83
+ 83,184987,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",0,0,"",shellscript,tab
84
+ 84,187194,"TERMINAL",0,0,"sync-runner",,terminal_command
85
+ 85,187247,"TERMINAL",0,0,"]633;Csending incremental file list\r\n",,terminal_output
86
+ 86,187409,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh\r\n\r\nsent 62,582 bytes received 394 bytes 41,984.00 bytes/sec\r\ntotal size is 29,164,315 speedup is 463.10\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
87
+ 87,191995,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh",,terminal_command
88
+ 88,192012,"TERMINAL",0,0,"]633;CSubmitted batch job 3583332\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
89
+ 89,198751,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh",,terminal_command
90
+ 90,198801,"TERMINAL",0,0,"]633;C",,terminal_output
91
+ 91,198813,"TERMINAL",0,0,"Submitted batch job 3583333\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
92
+ 92,202827,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh",,terminal_command
93
+ 93,202874,"TERMINAL",0,0,"]633;CSubmitted batch job 3583334\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
94
+ 94,209916,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",,terminal_command
95
+ 95,209957,"TERMINAL",0,0,"]633;CSubmitted batch job 3583335\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
96
+ 96,211074,"TERMINAL",0,0,"queue",,terminal_command
97
+ 97,211122,"TERMINAL",0,0,"]633;C",,terminal_output
98
+ 98,211181,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Sun Oct 19 16:42:24 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3583320 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)3583335 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583334 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583333 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583332 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output
99
+ 99,212208,"TERMINAL",0,0,"5",,terminal_output
100
+ 100,213356,"TERMINAL",0,0,"6",,terminal_output
101
+ 101,214401,"TERMINAL",0,0,"8",,terminal_output
102
+ 102,215453,"TERMINAL",0,0,"9",,terminal_output
103
+ 103,215550,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jasmine_jobs",,terminal_output
104
+ 104,251258,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",0,0,"",shellscript,tab
105
+ 105,255449,"TERMINAL",0,0,"dev",,terminal_command
106
+ 106,256839,"TERMINAL",0,0,"cd slurm/",,terminal_command
107
+ 107,259312,"TERMINAL",0,0,"git status",,terminal_command
108
+ 108,259357,"TERMINAL",0,0,"]633;C",,terminal_output
109
+ 109,259791,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add <file>..."" to update what will be committed)\r\n (use ""git restore <file>..."" to discard changes in working directory)\r\n\tmodified: jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model.sh\r\n\tmodified: jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn.sh\r\n\tmodified: jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_no_flash_attn_single_gpu.sh\r\n\tmodified: jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/slurm",,terminal_output
110
+ 110,279284,"TERMINAL",0,0,"git add jobs/mihir/horeka/coinrun/big-model/",,terminal_command
111
+ 111,279342,"TERMINAL",0,0,"]633;C",,terminal_output
112
+ 112,279455,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jasmine/slurm",,terminal_output
113
+ 113,307795,"TERMINAL",0,0,"git commit -m 'changed big model flash attn ablation to bs 8'",,terminal_command
114
+ 114,307857,"TERMINAL",0,0,"]633;C",,terminal_output
115
+ 115,308001,"TERMINAL",0,0,"[main 10fe9f3] changed big model flash attn ablation to bs 8\r\n 4 files changed, 13 insertions(+), 13 deletions(-)\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/slurm",,terminal_output
116
+ 116,309154,"TERMINAL",0,0,"git push",,terminal_command
117
+ 117,309210,"TERMINAL",0,0,"]633;C",,terminal_output
118
+ 118,310514,"TERMINAL",0,0,"Enumerating objects: 21, done.\r\nCounting objects: 4% (1/21)\rCounting objects: 9% (2/21)\rCounting objects: 14% (3/21)\rCounting objects: 19% (4/21)\rCounting objects: 23% (5/21)\rCounting objects: 28% (6/21)\rCounting objects: 33% (7/21)\rCounting objects: 38% (8/21)\rCounting objects: 42% (9/21)\rCounting objects: 47% (10/21)\rCounting objects: 52% (11/21)\rCounting objects: 57% (12/21)\rCounting objects: 61% (13/21)\rCounting objects: 66% (14/21)\rCounting objects: 71% (15/21)\rCounting objects: 76% (16/21)\rCounting objects: 80% (17/21)\rCounting objects: 85% (18/21)\rCounting objects: 90% (19/21)\rCounting objects: 95% (20/21)\rCounting objects: 100% (21/21)\rCounting objects: 100% (21/21), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 9% (1/11)\rCompressing objects: 18% (2/11)\rCompressing objects: 27% (3/11)\rCompressing objects: 36% (4/11)\rCompressing objects: 45% (5/11)\rCompressing objects: 54% (6/11)\rCompressing objects: 63% (7/11)\rCompressing objects: 72% (8/11)\rCompressing objects: 81% (9/11)\rCompressing objects: 90% (10/11)\rCompressing objects: 100% (11/11)\rCompressing objects: 100% (11/11), done.\r\nWriting objects: 9% (1/11)\rWriting objects: 18% (2/11)\rWriting objects: 27% (3/11)\rWriting objects: 36% (4/11)\rWriting objects: 45% (5/11)\rWriting objects: 54% (6/11)\rWriting objects: 63% (7/11)\rWriting objects: 72% (8/11)\rWriting objects: 81% (9/11)\rWriting objects: 90% (10/11)\rWriting objects: 100% (11/11)\rWriting objects: 100% (11/11), 945 bytes | 945.00 KiB/s, done.\r\nTotal 11 (delta 9), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output
119
+ 119,310646,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/9)\rremote: Resolving deltas: 11% (1/9)\rremote: Resolving deltas: 22% (2/9)\rremote: Resolving deltas: 33% (3/9)\rremote: Resolving deltas: 44% (4/9)\rremote: Resolving deltas: 55% (5/9)\rremote: Resolving deltas: 66% (6/9)\rremote: Resolving deltas: 77% (7/9)\rremote: Resolving deltas: 88% (8/9)\rremote: Resolving deltas: 100% (9/9)\rremote: Resolving deltas: 100% (9/9), completed with 9 local objects.\r\n",,terminal_output
120
+ 120,310695,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n 067a1f1..10fe9f3 main -> main\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/slurm",,terminal_output
121
+ 121,370586,"TERMINAL",0,0,"queue",,terminal_command
122
+ 122,370660,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Sun Oct 19 16:45:04 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3583333 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583334 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583335 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583332 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3583320 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output
123
+ 123,371716,"TERMINAL",0,0,"5",,terminal_output
124
+ 124,372751,"TERMINAL",0,0,"6",,terminal_output
125
+ 125,373798,"TERMINAL",0,0,"7",,terminal_output
126
+ 126,374848,"TERMINAL",0,0,"8",,terminal_output
127
+ 127,375883,"TERMINAL",0,0,"9",,terminal_output
128
+ 128,376937,"TERMINAL",0,0,"10",,terminal_output
129
+ 129,377972,"TERMINAL",0,0,"1",,terminal_output
130
+ 130,378819,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jasmine/slurm",,terminal_output
131
+ 131,383228,"TERMINAL",0,0,"cd $ws_dir",,terminal_command
132
+ 132,383955,"TERMINAL",0,0,"ls",,terminal_command
133
+ 133,384017,"TERMINAL",0,0,"]633;Ccheckpoints data data_breakout data_doom data_new logs scripts\r\ncount_items.sh data_atari data_coinrun data_minecraft huggingface possibly_corrupt_files_in_this_workspace.txt\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared",,terminal_output
134
+ 134,389408,"TERMINAL",0,0,"cd data_doom/",,terminal_command
135
+ 135,389748,"TERMINAL",0,0,"ls",,terminal_command
136
+ 136,389801,"TERMINAL",0,0,"]633;C",,terminal_output
137
+ 137,389853,"TERMINAL",0,0,"dev doom_episodes_10m_bak doom_episodes_1m_bak doom_episodes_50m_bak\r\ndev_bak doom_episodes_10m_low_res_bak doom_episodes_1m_low_res_bak doom_episodes_50m_low_res_bak\r\ndoom_episodes_10m doom_episodes_1m doom_episodes_50m\r\ndoom_episodes_10m_120x160_fixed doom_episodes_1m_120x160_fixed doom_episodes_50m_120x160_fixed\r\ndoom_episodes_10m_60x80_fixed doom_episodes_1m_60x80 doom_episodes_50m_60x80_fixed\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom",,terminal_output
138
+ 138,399058,"TERMINAL",0,0,"cd doom_episodes_50m_60x80_fixed",,terminal_command
139
+ 139,399453,"TERMINAL",0,0,"ls",,terminal_command
140
+ 140,399461,"TERMINAL",0,0,"]633;Cmetadata.json train\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom/doom_episodes_50m_60x80_fixed",,terminal_output
141
+ 141,429513,"TERMINAL",0,0,"rm -rf *_bak",,terminal_command
142
+ 142,429520,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom/doom_episodes_50m_60x80_fixed",,terminal_output
143
+ 143,431212,"TERMINAL",0,0,"ls",,terminal_command
144
+ 144,431216,"TERMINAL",0,0,"]633;Cmetadata.json train\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom/doom_episodes_50m_60x80_fixed",,terminal_output
145
+ 145,433494,"TERMINAL",0,0,"cd ..",,terminal_command
146
+ 146,436362,"TERMINAL",0,0,"rm -rf *_bak",,terminal_command
147
+ 147,436410,"TERMINAL",0,0,"]633;C",,terminal_output
148
+ 148,440124,"TERMINAL",0,0,"^C",,terminal_output
149
+ 149,440160,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom",,terminal_output
150
+ 150,440768,"TERMINAL",0,0,"ls",,terminal_command
151
+ 151,440784,"TERMINAL",0,0,"]633;Cdev doom_episodes_10m_bak doom_episodes_1m_60x80 doom_episodes_50m_120x160_fixed\r\ndoom_episodes_10m doom_episodes_10m_low_res_bak doom_episodes_1m_bak doom_episodes_50m_60x80_fixed\r\ndoom_episodes_10m_120x160_fixed doom_episodes_1m doom_episodes_1m_low_res_bak doom_episodes_50m_bak\r\ndoom_episodes_10m_60x80_fixed doom_episodes_1m_120x160_fixed doom_episodes_50m doom_episodes_50m_low_res_bak\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom",,terminal_output
152
+ 152,447892,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1709,0,"",shellscript,selection_mouse
153
+ 153,447914,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1708,0,"",shellscript,selection_command
154
+ 154,448422,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1799,0,"",shellscript,selection_mouse
155
+ 155,448425,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1798,0,"",shellscript,selection_command
156
+ 156,448903,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1848,0,"",shellscript,selection_mouse
157
+ 157,448906,"slurm/jobs/mihir/horeka/coinrun/big-model/train_dyn_big_model_single_gpu.sh",1847,0,"",shellscript,selection_command
158
+ 158,562225,"jasmine/utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\ndef _get_spatiotemporal_positional_encoding(d_model: int, max_len: int = 5000):\n """"""\n Creates a function that applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n pe = jnp.zeros((max_len, d_model))\n position = jnp.arange(0, max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(jnp.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def _encode(x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = pe[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = pe[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n return _encode\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n use_flash_attention=False, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM, sow_weights=self.sow_weights)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM, sow_weights=self.sow_weights)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool = False,\n sow_activations: bool = False,\n sow_logits: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = _get_spatiotemporal_positional_encoding(\n self.model_dim, max_len=max_len\n )\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n use_flash_attention=False, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(\n self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n\n return x_BTNM\n\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_logits: bool = False,\n sow_weights: bool = False,\n sow_activations: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = _get_spatiotemporal_positional_encoding(\n self.model_dim, max_len=max_len\n )\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n\n def __init__(\n self,\n latent_dim: int,\n num_latents: int,\n dropout: float,\n dtype: jnp.dtype,\n rngs: nnx.Rngs,\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n self.dtype = dtype\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.normal(stddev=1)(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = x_DL.astype(self.dtype)\n codebook = self.codebook.value.astype(self.dtype)\n\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(codebook)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(\n query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs\n ):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = (\n jnp.pad(\n _merge_batch_dims(bias),\n ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K)),\n )\n if bias is not None\n else None\n )\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
159
+ 159,575334,"jasmine/utils/preprocess.py",0,0,"import einops\nimport jax\nimport jax.numpy as jnp\n\n\ndef patchify(videos: jax.Array, size: int) -> jax.Array:\n B, T, H, W, C = videos.shape\n x = jnp.pad(videos, ((0, 0), (0, 0), (0, -H % size), (0, -W % size), (0, 0)))\n return einops.rearrange(\n x, ""b t (hn hp) (wn wp) c -> b t (hn wn) (hp wp c)"", hp=size, wp=size\n )\n\n\ndef unpatchify(patches: jax.Array, size: int, h_out: int, w_out: int) -> jax.Array:\n h_pad = -h_out % size\n hn = (h_out + h_pad) // size\n x = einops.rearrange(\n patches,\n ""b t (hn wn) (hp wp c) -> b t (hn hp) (wn wp) c"",\n hp=size,\n wp=size,\n hn=hn,\n )\n return x[:, :, :h_out, :w_out]\n",python,tab
160
+ 160,579235,"jasmine/utils/preprocess.py",85,0,"",python,selection_mouse
161
+ 161,579320,"jasmine/utils/preprocess.py",83,4,"size",python,selection_mouse
162
+ 162,580825,"jasmine/utils/preprocess.py",86,0,"",python,selection_mouse
163
+ 163,604240,"jasmine/utils/nn.py",0,0,"",python,tab
164
+ 164,611046,"TERMINAL",0,0,"salloc",,terminal_focus
165
+ 165,763704,"TERMINAL",0,0,"salloc: job 3583320 has been allocated resources\r\nsalloc: Granted job allocation 3583320\r\n",,terminal_output
166
+ 166,763784,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output
167
+ 167,790873,"TERMINAL",0,0,"salloc: Nodes hkn0904 are ready for job\r\n",,terminal_output
168
+ 168,791597,"TERMINAL",0,0,"]0;tum_cte0515@hkn0904:~/Projects/jasmine[?2004h[tum_cte0515@hkn0904 jasmine]$ ",,terminal_output
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-53807628-1d5f-454c-846d-8a22156439901761331069237-2025_10_24-20.39.31.264/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-59804d2c-df27-4b41-9fe6-ffdfef8825021760703869419-2025_10_17-14.25.51.972/source.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,3471,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
3
+ 3,3682,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:25:50 PM [info] Activating crowd-code\n2:25:51 PM [info] Recording started\n2:25:52 PM [info] Initializing git provider using file system watchers...\n2:25:52 PM [info] No workspace folder found\n2:25:55 PM [info] Retrying git provider initialization...\n2:25:55 PM [info] No workspace folder found\n",Log,content
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-5b002f72-29b2-49c4-8fb4-15e9a0f2c68a1760283746731-2025_10_12-17.43.07.478/source.csv ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,5,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_grain_ablation\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\nexport CUDA_VISIBLE_DEVICES=0\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/npy_test\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_default/3528955\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=2048 \\n --patch_size=16 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --num_steps=10_000 \\n --log_image_interval=100_000 \\n --log \\n --log_checkpoint_interval=100_000 \\n --name=coinrun-dynamics-maskgit-grain-ablation-bs2048-$slurm_job_id \\n --tags coinrun dynamics maskgit grain-ablation bs2048 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir_train &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab
3
+ 2,1762,"TERMINAL",0,0,"",,terminal_focus
4
+ 3,2575,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:43:07 PM [info] Activating crowd-code\n5:43:07 PM [info] Recording started\n5:43:07 PM [info] Initializing git provider using file system watchers...\n5:43:08 PM [info] Git repository found\n5:43:08 PM [info] Git provider initialized successfully\n5:43:08 PM [info] Initial git state: [object Object]\n",Log,tab
5
+ 4,3422,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"",shellscript,tab
6
+ 5,4729,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/bin/activate",,terminal_command
7
+ 6,42983,"TERMINAL",0,0,"scancel 3562087",,terminal_command
8
+ 7,43003,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
9
+ 8,57931,"TERMINAL",0,0,"scancel 3562088",,terminal_command
10
+ 9,57955,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
11
+ 10,73562,"TERMINAL",0,0,"scancel 3562089",,terminal_command
12
+ 11,73582,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
13
+ 12,87435,"TERMINAL",0,0,"scancel 3562081",,terminal_command
14
+ 13,87459,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
15
+ 14,95439,"TERMINAL",0,0,"scancel 3562082",,terminal_command
16
+ 15,95459,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
17
+ 16,105395,"TERMINAL",0,0,"scancel 3562083",,terminal_command
18
+ 17,105420,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
19
+ 18,114078,"TERMINAL",0,0,"scancel 3562076",,terminal_command
20
+ 19,114105,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
21
+ 20,119314,"TERMINAL",0,0,"scancel 3562077",,terminal_command
22
+ 21,119343,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
23
+ 22,180469,"TERMINAL",0,0,"scancel 3562078",,terminal_command
24
+ 23,180518,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
25
+ 24,199040,"TERMINAL",0,0,"scancel 3562084",,terminal_command
26
+ 25,199065,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
27
+ 26,223683,"TERMINAL",0,0,"scancel 3562090",,terminal_command
28
+ 27,223700,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-b510dc45-0390-4ba1-9cd3-8518acd9730d1761060552425-2025_10_21-17.29.37.898/source.csv ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,3909,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:29:37 PM [info] Activating crowd-code\n5:29:37 PM [info] Recording started\n5:29:37 PM [info] Initializing git provider using file system watchers...\n5:29:38 PM [info] Git repository found\n5:29:38 PM [info] Git provider initialized successfully\n5:29:38 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,6216,"TERMINAL",0,0,"",,terminal_command
4
+ 4,6232,"TERMINAL",0,0,"]633;C]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
5
+ 5,13635,"TERMINAL",0,0,"",,terminal_command
6
+ 6,49183,"TERMINAL",0,0,"",,terminal_focus
7
+ 7,51836,"TERMINAL",0,0,"git branch",,terminal_command
8
+ 8,51916,"TERMINAL",0,0,"]633;C[?1h=\r* feat/diffusion-backend\r\n main\r\n vizdoom-dataset\r\n\r[?1l>]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
9
+ 9,53098,"TERMINAL",0,0,"git pull",,terminal_command
10
+ 10,53149,"TERMINAL",0,0,"]633;C",,terminal_output
11
+ 11,54878,"TERMINAL",0,0,"remote: Enumerating objects: 18, done.\r\nremote: Counting objects: 5% (1/18)\rremote: Counting objects: 11% (2/18)\rremote: Counting objects: 16% (3/18)\rremote: Counting objects: 22% (4/18)\rremote: Counting objects: 27% (5/18)\rremote: Counting objects: 33% (6/18)\rremote: Counting objects: 38% (7/18)\rremote: Counting objects: 44% (8/18)\rremote: Counting objects: 50% (9/18)\rremote: Counting objects: 55% (10/18)\rremote: Counting objects: 61% (11/18)\rremote: Counting objects: 66% (12/18)\rremote: Counting objects: 72% (13/18)\rremote: Counting objects: 77% (14/18)\rremote: Counting objects: 83% (15/18)\rremote: Counting objects: 88% (16/18)\rremote: Counting objects: 94% (17/18)\rremote: Counting objects: 100% (18/18)\rremote: Counting objects: 100% (18/18), done.\r\nremote: Compressing objects: 14% (1/7)\rremote: Compressing objects: 28% (2/7)\rremote: Compressing objects: 42% (3/7)\rremote: Compressing objects: 57% (4/7)\rremote: Compressing objects: 71% (5/7)\rremote: Compressing objects: 85% (6/7)\rremote: Compressing objects: 100% (7/7)\rremote: Compressing objects: 100% (7/7), done.\r\nremote: Total 18 (delta 11), reused 18 (delta 11), pack-reused 0 (from 0)\r\nUnpacking objects: 5% (1/18)\rUnpacking objects: 11% (2/18)\rUnpacking objects: 16% (3/18)\rUnpacking objects: 22% (4/18)\rUnpacking objects: 27% (5/18)\rUnpacking objects: 33% (6/18)\rUnpacking objects: 38% (7/18)\rUnpacking objects: 44% (8/18)\rUnpacking objects: 50% (9/18)\rUnpacking objects: 55% (10/18)\rUnpacking objects: 61% (11/18)\rUnpacking objects: 66% (12/18)\rUnpacking objects: 72% (13/18)\rUnpacking objects: 77% (14/18)\rUnpacking objects: 83% (15/18)\rUnpacking objects: 88% (16/18)\rUnpacking objects: 94% (17/18)\rUnpacking objects: 100% (18/18)\rUnpacking objects: 100% (18/18), 9.42 KiB | 79.00 KiB/s, done.\r\n",,terminal_output
12
+ 12,54958,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n * [new branch] feat/diffusion-forcing -> origin/feat/diffusion-forcing\r\n",,terminal_output
13
+ 13,54985,"TERMINAL",0,0,"Already up to date.\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
14
+ 14,58816,"TERMINAL",0,0,"git checkout feat/diffusion-forcing",,terminal_command
15
+ 15,58866,"TERMINAL",0,0,"]633;C",,terminal_output
16
+ 16,58956,"TERMINAL",0,0,"branch 'feat/diffusion-forcing' set up to track 'origin/feat/diffusion-forcing'.\r\nSwitched to a new branch 'feat/diffusion-forcing'\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
17
+ 17,60163,"TERMINAL",0,0,"git pull",,terminal_command
18
+ 18,60198,"TERMINAL",0,0,"]633;C",,terminal_output
19
+ 19,60567,"",0,0,"Switched from branch 'feat/diffusion-backend' to 'feat/diffusion-forcing'",,git_branch_checkout
20
+ 20,60940,"TERMINAL",0,0,"c",,terminal_output
21
+ 21,61068,"TERMINAL",0,0,"d",,terminal_output
22
+ 22,61160,"TERMINAL",0,0," ",,terminal_output
23
+ 23,61827,"TERMINAL",0,0,"Already up to date.\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
24
+ 24,63858,"TERMINAL",0,0,"cd slurm/",,terminal_command
25
+ 25,63877,"TERMINAL",0,0,"]633;C]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1/slurm",,terminal_output
26
+ 26,66596,"TERMINAL",0,0,"git pull",,terminal_command
27
+ 27,66631,"TERMINAL",0,0,"]633;C",,terminal_output
28
+ 28,68249,"TERMINAL",0,0,"remote: Enumerating objects: 13, done.\r\nremote: Counting objects: 7% (1/13)\rremote: Counting objects: 15% (2/13)\rremote: Counting objects: 23% (3/13)\rremote: Counting objects: 30% (4/13)\rremote: Counting objects: 38% (5/13)\rremote: Counting objects: 46% (6/13)\rremote: Counting objects: 53% (7/13)\rremote: Counting objects: 61% (8/13)\rremote: Counting objects: 69% (9/13)\rremote: Counting objects: 76% (10/13)\rremote: Counting objects: 84% (11/13)\rremote: Counting objects: 92% (12/13)\rremote: Counting objects: 100% (13/13)\rremote: Counting objects: 100% (13/13), done.\r\nremote: Compressing objects: 25% (1/4)\rremote: Compressing objects: 50% (2/4)\rremote: Compressing objects: 75% (3/4)\rremote: Compressing objects: 100% (4/4)\rremote: Compressing objects: 100% (4/4), done.\r\nremote: Total 7 (delta 3), reused 7 (delta 3), pack-reused 0 (from 0)\r\nUnpacking objects: 14% (1/7)\rUnpacking objects: 28% (2/7)\rUnpacking objects: 42% (3/7)\rUnpacking objects: 57% (4/7)\rUnpacking objects: 71% (5/7)\rUnpacking objects: 85% (6/7)\rUnpacking objects: 100% (7/7)\rUnpacking objects: 100% (7/7), 642 bytes | 13.00 KiB/s, done.\r\n",,terminal_output
29
+ 29,68357,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n 14e9cef..9f53682 main -> origin/main\r\n",,terminal_output
30
+ 30,68422,"TERMINAL",0,0,"Updating 14e9cef..9f53682\r\n",,terminal_output
31
+ 31,68561,"TERMINAL",0,0,"Fast-forward\r\n jobs/mihir/berlin/diffusion/dyn_dit_breakout.sh | 3 ++-\r\n 1 file changed, 2 insertions(+), 1 deletion(-)\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1/slurm",,terminal_output
32
+ 32,103592,"TERMINAL",0,0,"",,terminal_command
33
+ 33,103604,"TERMINAL",0,0,"]633;C]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
34
+ 34,160099,"TERMINAL",0,0,"git pull",,terminal_command
35
+ 35,160149,"TERMINAL",0,0,"]633;C",,terminal_output
36
+ 36,161751,"TERMINAL",0,0,"remote: Enumerating objects: 14, done.\r\nremote: Counting objects: 7% (1/14)\rremote: Counting objects: 14% (2/14)\rremote: Counting objects: 21% (3/14)\rremote: Counting objects: 28% (4/14)\rremote: Counting objects: 35% (5/14)\rremote: Counting objects: 42% (6/14)\rremote: Counting objects: 50% (7/14)\rremote: Counting objects: 57% (8/14)\rremote: Counting objects: 64% (9/14)\rremote: Counting objects: 71% (10/14)\rremote: Counting objects: 78% (11/14)\rremote: Counting objects: 85% (12/14)\rremote: Counting objects: 92% (13/14)\rremote: Counting objects: 100% (14/14)\rremote: Counting objects: 100% (14/14), done.\r\nremote: Compressing objects: 20% (1/5)\rremote: Compressing objects: 40% (2/5)\rremote: Compressing objects: 60% (3/5)\rremote: Compressing objects: 80% (4/5)\rremote: Compressing objects: 100% (5/5)\rremote: Compressing objects: 100% (5/5), done.\r\nremote: Total 8 (delta 3), reused 8 (delta 3), pack-reused 0 (from 0)\r\nUnpacking objects: 12% (1/8)\rUnpacking objects: 25% (2/8)\rUnpacking objects: 37% (3/8)\rUnpacking objects: 50% (4/8)\rUnpacking objects: 62% (5/8)\rUnpacking objects: 75% (6/8)\rUnpacking objects: 87% (7/8)\rUnpacking objects: 100% (8/8)\rUnpacking objects: 100% (8/8), 1.19 KiB | 25.00 KiB/s, done.\r\nFrom github.com:p-doom/slurm\r\n 9f53682..15cb084 main -> origin/main\r\n",,terminal_output
37
+ 37,161815,"TERMINAL",0,0,"Updating 9f53682..15cb084\r\n",,terminal_output
38
+ 38,162012,"TERMINAL",0,0,"Fast-forward\r\n jobs/mihir/berlin/diffusion/dyn_dit_breakout_dev.sh | 46 ++++++++++++++++++++++++++++++++\r\n jobs/mihir/berlin/diffusion/dyn_dit_coinrun.sh | 4 +--\r\n 2 files changed, 48 insertions(+), 2 deletions(-)\r\n create mode 100644 jobs/mihir/berlin/diffusion/dyn_dit_breakout_dev.sh\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1/slurm",,terminal_output
39
+ 39,164992,"TERMINAL",0,0,"cd ..",,terminal_command
40
+ 40,165020,"TERMINAL",0,0,"]633;C]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
41
+ 41,175690,"TERMINAL",0,0,"source ../jasmine/.venv/bin/activate",,terminal_command
42
+ 42,175698,"TERMINAL",0,0,"]633;C]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
43
+ 43,216697,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/berlin/diffusion/dyn_dit_coinrun.sh",,terminal_command
44
+ 44,216728,"TERMINAL",0,0,"]633;CSubmitted batch job 32406\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
45
+ 45,263827,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/berlin/diffusion/dyn_dit_breakout.sh",,terminal_command
46
+ 46,263927,"TERMINAL",0,0,"]633;CSubmitted batch job 32407\r\n]0;mihir.mahajan@hai-login2:~/Projects/jasmine_runner_1",,terminal_output
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-c0b85b13-745f-4849-b691-3865a0b92b6b1760870484089-2025_10_19-12.42.53.163/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-e1782904-5e7c-4264-b342-b03f67dbe6421760704011771-2025_10_17-14.28.12.326/source.csv ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,5,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_grain_ablation\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\nexport CUDA_VISIBLE_DEVICES=0\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/npy_test\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_default/3528955\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=2048 \\n --patch_size=16 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --num_steps=10_000 \\n --log_image_interval=100_000 \\n --log \\n --log_checkpoint_interval=100_000 \\n --name=coinrun-dynamics-maskgit-grain-ablation-bs2048-$slurm_job_id \\n --tags coinrun dynamics maskgit grain-ablation bs2048 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir_train &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab
3
+ 2,1991,"anysphere.remote-ssh.Remote - SSH",0,0,"2025-10-17 14:27:11.423 [info] Resolving ssh remote authority 'horeka' (Unparsed 'ssh-remote+7b22686f73744e616d65223a22686f72656b61227d') (attempt #1)\n2025-10-17 14:27:12.383 [info] SSH askpass server listening on /var/folders/mr/xp86mpcd01sd72mft8vjq27r0000gn/T/cursor-ssh-Vf6muF/socket.sock\n2025-10-17 14:27:12.386 [info] Using configured platform linux for remote host horeka\n2025-10-17 14:27:12.391 [info] Using askpass script: /Users/mihir/.cursor/extensions/anysphere.remote-ssh-1.0.33/dist/scripts/launchSSHAskpass.sh with javascript file /Users/mihir/.cursor/extensions/anysphere.remote-ssh-1.0.33/dist/scripts/sshAskClient.js. Askpass handle: /var/folders/mr/xp86mpcd01sd72mft8vjq27r0000gn/T/cursor-ssh-Vf6muF/socket.sock\n2025-10-17 14:27:12.439 [info] Launching SSH server via shell with command: cat ""/var/folders/mr/xp86mpcd01sd72mft8vjq27r0000gn/T/cursor_remote_install_aaae7e95-0157-4d26-bbba-4e135dcf22c0.sh"" | ssh -T -D 59659 horeka bash --login -c bash\n2025-10-17 14:27:12.439 [info] Establishing SSH connection: cat ""/var/folders/mr/xp86mpcd01sd72mft8vjq27r0000gn/T/cursor_remote_install_aaae7e95-0157-4d26-bbba-4e135dcf22c0.sh"" | ssh -T -D 59659 horeka bash --login -c bash\n2025-10-17 14:27:12.440 [info] Started installation script. Waiting for it to finish...\n2025-10-17 14:27:12.440 [info] Waiting for server to install. Timeout: 30000ms\n2025-10-17 14:27:14.393 [info] Askpass server received request: POST /\n2025-10-17 14:27:14.395 [info] Askpass server received request body: {""request"":""([email protected]) Your OTP: ""}\n2025-10-17 14:27:14.395 [info] Pausing timeout; waiting for askpass response\n2025-10-17 14:27:14.395 [info] Received SSH askpass request: ([email protected]) Your OTP: \n2025-10-17 14:27:26.404 [info] Resuming timeout; askpass response received\n2025-10-17 14:27:27.710 [info] Askpass server received request: POST /\n2025-10-17 14:27:27.711 [info] Askpass server received request body: {""request"":""([email protected]) Password: ""}\n2025-10-17 14:27:27.711 [info] Pausing timeout; waiting for askpass response\n2025-10-17 14:27:27.711 [info] Received SSH askpass request: ([email protected]) Password: \n2025-10-17 14:27:30.791 [info] Resuming timeout; askpass response received\n2025-10-17 14:27:35.361 [info] (ssh_tunnel) stdout: Configuring Cursor Server on Remote\n\n2025-10-17 14:27:35.387 [info] (ssh_tunnel) stdout: Using TMP_DIR: /run/user/999226\n\n2025-10-17 14:27:35.580 [info] (ssh_tunnel) stdout: Locking /run/user/999226/cursor-remote-lock.41b851e3afada0dcdfba85e69b64011c\n\n2025-10-17 14:27:35.619 [info] (ssh_tunnel) stdout: Downloading server via wget from https://downloads.cursor.com/production/b9e5948c1ad20443a5cecba6b84a3c9b99d62582/linux/x64/cursor-reh-linux-x64.tar.gz to cursor-server-b1bbf4ae-e357-45d9-9f3e-0f589c7d9160.tar.gz\n\n2025-10-17 14:27:35.642 [info] (ssh_tunnel) stderr: --2025-10-17 14:27:35-- https://downloads.cursor.com/production/b9e5948c1ad20443a5cecba6b84a3c9b99d62582/linux/x64/cursor-reh-linux-x64.tar.gz\n\n2025-10-17 14:27:35.758 [info] (ssh_tunnel) stderr: Resolving downloads.cursor.com (downloads.cursor.com)... \n2025-10-17 14:27:35.782 [info] (ssh_tunnel) stderr: 2606:4700::6812:1080, 2606:4700::6812:1180, 104.18.16.128, ...\nConnecting to downloads.cursor.com (downloads.cursor.com)|2606:4700::6812:1080|:443... \n2025-10-17 14:27:35.783 [info] (ssh_tunnel) stderr: connected.\n\n2025-10-17 14:27:35.872 [info] (ssh_tunnel) stderr: HTTP request sent, awaiting response... \n2025-10-17 14:27:35.888 [info] (ssh_tunnel) stderr: 200 OK\nLength: 64733248 (62M) [application/gzip]\nSaving to: ‘cursor-server-b1bbf4ae-e357-45d9-9f3e-0f589c7d9160.tar.gz’\n\n\ncursor-server-b1bbf 0%[ ] 0 --.-KB/s \n2025-10-17 14:27:36.087 [info] (ssh_tunnel) stderr: \ncursor-server-b1bbf 79%[==============> ] 49.19M 246MB/s \n2025-10-17 14:27:36.111 [info] (ssh_tunnel) stderr: \ncursor-server-b1bbf 100%[===================>] 61.73M 275MB/s in 0.2s \n\n\n2025-10-17 14:27:36.111 [info] (ssh_tunnel) stderr: 2025-10-17 14:27:36 (275 MB/s) - ‘cursor-server-b1bbf4ae-e357-45d9-9f3e-0f589c7d9160.tar.gz’ saved [64733248/64733248]\n\n\n2025-10-17 14:27:36.113 [info] (ssh_tunnel) stdout: Extracting server contents from cursor-server-b1bbf4ae-e357-45d9-9f3e-0f589c7d9160.tar.gz\n\n2025-10-17 14:27:48.707 [info] (ssh_tunnel) stdout: Checking node executable\nv20.18.2\n\n2025-10-17 14:27:48.741 [info] (ssh_tunnel) stdout: Checking for running multiplex server: /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/multiplex-server/3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8.js\n\n2025-10-17 14:27:48.885 [info] (ssh_tunnel) stdout: Running multiplex server: \n\n2025-10-17 14:27:48.901 [info] (ssh_tunnel) stdout: Creating multiplex server token file /run/user/999226/cursor-remote-multiplex.token.41b851e3afada0dcdfba85e69b64011c.3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8\n\n2025-10-17 14:27:48.948 [info] (ssh_tunnel) stdout: Creating directory for multiplex server: /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/multiplex-server\nWriting multiplex server script to /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/multiplex-server/3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8.js\n\n2025-10-17 14:27:48.949 [info] (ssh_tunnel) stdout: Starting multiplex server: /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/b9e5948c1ad20443a5cecba6b84a3c9b99d62580/node /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/multiplex-server/3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8.js 1ee871c9-0446-4594-ae27-10cdda6cbdc8 0\n\n2025-10-17 14:27:48.959 [info] (ssh_tunnel) stdout: Multiplex server started with PID 3206117 and wrote pid to file /run/user/999226/cursor-remote-multiplex.pid.41b851e3afada0dcdfba85e69b64011c.3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8\n\n2025-10-17 14:27:48.959 [info] (ssh_tunnel) stdout: Reading multiplex server token file /run/user/999226/cursor-remote-multiplex.token.41b851e3afada0dcdfba85e69b64011c.3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8\n\n2025-10-17 14:27:48.961 [info] (ssh_tunnel) stdout: Multiplex server token file found\n\n2025-10-17 14:27:48.970 [info] (ssh_tunnel) stdout: Reading multiplex server log file /run/user/999226/cursor-remote-multiplex.log.41b851e3afada0dcdfba85e69b64011c.3ce73d09cffc8f33c6d911e972bd0f6dabbe3e26e810844be8060e6b10987db8\n\n2025-10-17 14:27:49.530 [info] (ssh_tunnel) stdout: Checking for code servers\n\n2025-10-17 14:27:49.688 [info] (ssh_tunnel) stdout: Code server script is not running\nCreating code server token file /run/user/999226/cursor-remote-code.token.41b851e3afada0dcdfba85e69b64011c\n\n2025-10-17 14:27:49.699 [info] (ssh_tunnel) stdout: Starting code server script /home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/b9e5948c1ad20443a5cecba6b84a3c9b99d62580/bin/cursor-server --start-server --host=127.0.0.1 --port 0 --connection-token-file /run/user/999226/cursor-remote-code.token.41b851e3afada0dcdfba85e69b64011c --telemetry-level off --enable-remote-auto-shutdown --accept-server-license-terms &> /run/user/999226/cursor-remote-code.log.41b851e3afada0dcdfba85e69b64011c &\n\n2025-10-17 14:27:49.713 [info] (ssh_tunnel) stdout: Code server started with PID 3206229 and wrote pid to file /run/user/999226/cursor-remote-code.pid.41b851e3afada0dcdfba85e69b64011c\n\n2025-10-17 14:27:49.718 [info] (ssh_tunnel) stdout: Code server log file is /run/user/999226/cursor-remote-code.log.41b851e3afada0dcdfba85e69b64011c\n\n2025-10-17 14:27:50.269 [info] (ssh_tunnel) stdout: 2aef908d612b8a00fe727eb3: start\nexitCode==0==\nnodeExecutable==/home/hk-project-p0023960/tum_cte0515/.cursor-server/bin/b9e5948c1ad20443a5cecba6b84a3c9b99d62580/node==\nerrorMessage====\nisFatalError==false==\nmultiplexListeningOn==42623==\nmultiplexConnectionToken==1ee871c9-0446-4594-ae27-10cdda6cbdc8==\ncodeListeningOn==36077==\ncodeConnectionToken==ef447bc0-3351-48ed-9058-c609ca5ada42==\ndetectedPlatform==linux==\narch==x64==\nSSH_AUTH_SOCK====\n2aef908d612b8a00fe727eb3: end\n\n2025-10-17 14:27:50.282 [info] Server install command exit code: 0\n2025-10-17 14:27:50.282 [info] Deleting local script /var/folders/mr/xp86mpcd01sd72mft8vjq27r0000gn/T/cursor_remote_install_aaae7e95-0157-4d26-bbba-4e135dcf22c0.sh\n2025-10-17 14:27:50.285 [info] [forwarding][code] creating new forwarding server\n2025-10-17 14:27:50.285 [info] [forwarding][code] server listening on 127.0.0.1:59675\n2025-10-17 14:27:50.285 [info] [forwarding][code] Set up server\n2025-10-17 14:27:50.285 [info] [remote-ssh] codeListeningOn (remote=127.0.0.1:36077; local=127.0.0.1:59675) codeConnectionToken: ef447bc0-3351-48ed-9058-c609ca5ada42\n2025-10-17 14:27:50.286 [info] [forwarding][multiplex] creating new forwarding server\n2025-10-17 14:27:50.286 [info] [forwarding][multiplex] server listening on 127.0.0.1:59676\n2025-10-17 14:27:50.286 [info] [forwarding][multiplex] Set up server\n2025-10-17 14:27:50.291 [info] [remote-ssh] multiplexListeningOn (remote=[object Object]; local=[object Object]) multiplexConnectionToken: 1ee871c9-0446-4594-ae27-10cdda6cbdc8\n2025-10-17 14:27:50.291 [info] [remote-ssh] Pinging remote server via 127.0.0.1:59676...\n2025-10-17 14:27:50.305 [info] [remote-ssh] Resolved exec server. Socks port: 59659\n2025-10-17 14:27:50.306 [info] Setting up 0 default forwarded ports\n2025-10-17 14:27:50.306 [info] [remote-ssh] Resolved authority: {""host"":""127.0.0.1"",""port"":59675,""connectionToken"":""ef447bc0-3351-48ed-9058-c609ca5ada42"",""extensionHostEnv"":{}}. Socks port: 59659\n2025-10-17 14:27:50.312 [info] (ssh_tunnel) stdout: Unlocking /run/user/999226/cursor-remote-lock.41b851e3afada0dcdfba85e69b64011c\n \n***********************************************************************\n* This terminal is used to establish and maintain the SSH connection. *\n* Closing this terminal will terminate the connection and disconnect *\n* Cursor from the remote server. *\n***********************************************************************\n\n2025-10-17 14:27:50.321 [info] [forwarding][multiplex][127.0.0.1:59676 -> 127.0.0.1:42623][a3a6ebdb-cdda-4698-ba41-f0759f05c484] received connection request\n2025-10-17 14:27:50.323 [info] [command][161c1e8d-2172-4ae5-94c8-a366debebbe8] Sending command request: {""command"":""echo"",""args"":[""1""],""env"":{},""token"":""1ee871c9-0446-4594-ae27-10cdda6cbdc8"",""id"":""161c1e8d-2172-4ae5-94c8-a366debebbe8""}\n2025-10-17 14:27:50.348 [info] [forwarding][multiplex][127.0.0.1:59676 -> 127.0.0.1:59659 -> 127.0.0.1:42623][a3a6ebdb-cdda-4698-ba41-f0759f05c484] socks forwarding established\n2025-10-17 14:27:50.398 [info] [command][161c1e8d-2172-4ae5-94c8-a366debebbe8] Process exited with code 0\n2025-10-17 14:27:50.399 [info] [forwarding][multiplex][127.0.0.1:59676 -> 127.0.0.1:59659 -> 127.0.0.1:42623][a3a6ebdb-cdda-4698-ba41-f0759f05c484] socks connection closed\n2025-10-17 14:27:50.399 [info] [command][161c1e8d-2172-4ae5-94c8-a366debebbe8] Socket close event received\n2025-10-17 14:27:50.584 [info] [forwarding][code][127.0.0.1:59675 -> 127.0.0.1:36077][d1cd5e90-a9b8-4110-bf40-8ca1a14a1cd7] received connection request\n2025-10-17 14:27:50.608 [info] [forwarding][code][127.0.0.1:59675 -> 127.0.0.1:59659 -> 127.0.0.1:36077][d1cd5e90-a9b8-4110-bf40-8ca1a14a1cd7] socks forwarding established\n2025-10-17 14:27:50.650 [info] [forwarding][code][127.0.0.1:59675 -> 127.0.0.1:36077][23df6745-4ac6-4952-ae6d-8765dce6678e] received connection request\n2025-10-17 14:27:50.665 [info] [forwarding][code][127.0.0.1:59675 -> 127.0.0.1:59659 -> 127.0.0.1:36077][23df6745-4ac6-4952-ae6d-8765dce6678e] socks forwarding established\n2025-10-17 14:27:52.177 [info] Saved platform linux for remote host horeka\n",log,tab
4
+ 3,1993,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"",shellscript,tab
5
+ 4,2155,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:28:12 PM [info] Activating crowd-code\n2:28:12 PM [info] Recording started\n2:28:12 PM [info] Initializing git provider using file system watchers...\n2:28:13 PM [info] Git repository found\n2:28:13 PM [info] Git provider initialized successfully\n2:28:13 PM [info] Initial git state: [object Object]\n",Log,tab
6
+ 5,3541,"slurm/jobs/mihir/horeka/coinrun/speed_ablation/batch_size_2048/train_dyn_grain_ablation.sh",0,0,"",shellscript,tab
7
+ 6,3547,"TERMINAL",0,0,"",,terminal_focus
8
+ 7,17842,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/train_dynamics_maskgit_grain_ablation_3562055.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated-h100\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_grain_ablation\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\nexport CUDA_VISIBLE_DEVICES=0\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/npy_test\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_default/3528955\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1024 \\n --patch_size=16 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --num_steps=10_000 \\n --log_image_interval=100_000 \\n --log \\n --log_checkpoint_interval=100_000 \\n --name=coinrun-dynamics-maskgit-grain-ablation-bs1024-$slurm_job_id \\n --tags coinrun dynamics maskgit grain-ablation bs1024 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir_train &\n\nchild_pid=$!\n\nwait $child_pid/var/spool/slurmd/job3562055/slurm_script: line 43: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=1\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2451356\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs\nSLURMD_NODENAME=hkn0917\nSLURM_JOB_START_TIME=1760279968\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1760297968\nSLURM_CPUS_ON_NODE=6\nSLURM_JOB_CPUS_PER_NODE=6\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated-h100\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=1\nSLURM_JOBID=3562055\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=1\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi4.hkibbi4e2.hkn0917\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn0917\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=1\nSLURM_NNODES=1\nSLURM_SUBMIT_HOST=hkn1993.localdomain\nSLURM_JOB_ID=3562055\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_maskgit_grain_ablation\nSLURM_NTASKS_PER_NODE=1\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn0917\nGpuFreq=control_disabled\nW1012 16:40:06.278108 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.620322 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.733623 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.806173 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.845624 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.920240 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:06.984605 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.020546 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.037437 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.076474 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.123455 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.268985 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.384836 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.424767 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.463488 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.501554 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.539940 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.556504 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.573998 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.612195 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.653245 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.693419 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.762899 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.800440 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.839500 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.877807 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.894711 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:07.913906 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.006532 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.181725 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.264634 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.303428 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.322088 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.344175 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.482400 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.602166 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.642301 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.788758 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:08.908572 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.107393 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.183351 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.222005 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.340740 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.408423 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.450425 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.494651 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.533221 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.675608 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.752636 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:09.980066 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.094695 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.293123 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.350819 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.387901 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.507092 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.575955 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.619004 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.662116 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:10.699876 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.053999 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.167886 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.207556 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.323145 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.425586 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.534228 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:11.591821 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: creating run\nwandb: Tracking run with wandb version 0.22.0\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/wandb/run-20251012_164011-3562055\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run coinrun-dynamics-maskgit-grain-ablation-bs1024-3562055\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3562055\nW1012 16:40:13.377784 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.416190 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.454586 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.493075 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.531568 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.569808 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.607048 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.644294 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.708505 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.746751 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.784558 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.840759 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.879445 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.916986 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:13.975334 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.033039 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.199132 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.216748 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.233481 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.250267 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.267058 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.283697 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.300518 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.317590 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.334499 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.351376 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.385624 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.403368 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.420082 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.436760 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.466665 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.483686 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.500812 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.517617 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.534747 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.551155 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.568010 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.598417 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.628794 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nW1012 16:40:14.763067 2451418 sol_gpu_cost_model.cc:102] No SoL config found for device: NVIDIA H100. Using default config.\nRunning on 1 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555904, 'lam': 17640416, 'tokenizer': 34489696, 'total': 78686016}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/jasmine/train_dynamics.py"", line 821, in <module>\n main(args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/jasmine/train_dynamics.py"", line 466, in main\n train_iterator = build_dataloader(args, args.data_dir)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/jasmine/train_dynamics.py"", line 211, in build_dataloader\n grain_dataloader = get_dataloader(\n ^^^^^^^^^^^^^^^\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/jasmine/utils/dataloader.py"", line 120, in get_dataloader\n raise ValueError(""array_record_paths list cannot be empty."")\nValueError: array_record_paths list cannot be empty.\nwandb: \nwandb: 🚀 View run coinrun-dynamics-maskgit-grain-ablation-bs1024-3562055 at: https://wandb.ai/instant-uv/jafar/runs/3562055\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/wandb/run-20251012_164011-3562055/logs\nW1012 16:40:15.850200 2451538 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugproto job_name: ""jax_worker"": CANCELLED: CANCELLED\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\n:UNKNOWN:Error received from peer {grpc_message:""CANCELLED"", grpc_status:1} [type.googleapis.com/tensorflow.DerivedStatus='']\nsrun: error: hkn0917: task 0: Exited with exit code 1\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3562055\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 1)\nPartition: accelerated-h100\nNodes: 1\nCores per node: 6\nNodelist: hkn0917\nCPU Utilized: 00:00:15\nCPU Efficiency: 5.10% of 00:04:54 core-walltime\nJob Wall-clock time: 00:00:49\nStarttime: Sun Oct 12 16:39:28 2025\nEndtime: Sun Oct 12 16:40:17 2025\nMemory Utilized: 3.60 GB\nMemory Efficiency: 0.48% of 750.00 GB (750.00 GB/node)\nEnergy Consumed: 20316 Joule / 5.64333333333333 Watthours\nAverage node power draw: 414.612244897959 Watt\n",log,tab
9
+ 8,320235,"slurm/jobs/mihir/horeka/doom/resolution60x80/train_dyn_default.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/doom/dynamics/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/doom/dynamics/maskgit/%x_%j.log\n#SBATCH --job-name=dynamics_doom_60x80\n#SBATCH --exclude=hkn0735\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom/doom_episodes_10m_60x80_fixed/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_doom/doom_episodes_10m_60x80_fixed/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/doom/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/tokenizer/train_tokenizer_default_single_gpu_60x80/3547697\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --num_actions=20 \\n --ckpt_dir $CHECKPOINT_DIR \\n --name=doom-dynamics-60x80-$slurm_job_id \\n --image_height=60 \\n --image_width=80 \\n --tags doom dynamics maskgit default 60x80 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir_train \\n --val_data_dir $array_records_dir_val &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab
10
+ 9,1227671,"jasmine/train_tokenizer.py",0,0,"import os\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nfrom typing import cast, Optional\n\nimport einops\nimport itertools\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 64\n image_width: int = 64\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 30_000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = True\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 50\n log_image_interval: int = 1000\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 1000\n log_checkpoint_keep_period: int = 20_000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[TokenizerVQVAE, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n return (\n TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n ),\n rng,\n )\n\n\ndef build_optimizer(model: TokenizerVQVAE, args: Args) -> nnx.ModelAndOptimizer:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.ModelAndOptimizer(model, tx)\n return optimizer\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n return mesh, replicated_sharding, videos_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.ModelAndOptimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> Optional[ocp.CheckpointManager]:\n if args.restore_ckpt or args.save_ckpt:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n else:\n return None\n\n\ndef restore_checkpoint_if_needed(\n args: Args,\n checkpoint_manager: Optional[ocp.CheckpointManager],\n optimizer: nnx.ModelAndOptimizer,\n train_iterator: grain.DataLoaderIterator,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int, nnx.ModelAndOptimizer, grain.DataLoaderIterator, grain.DataLoaderIterator\n]:\n step = 0\n if checkpoint_manager and restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n assert checkpoint_manager is not None\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(restore_step, args=restore_args)\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = restore_step or 0\n print(f""Restored dataloader and model state from step {step}"")\n return step, optimizer, train_iterator, val_iterator\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n tokenizer, rng = build_model(args, rng)\n\n _, params, _ = nnx.split(tokenizer, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer = build_optimizer(tokenizer, args)\n del tokenizer\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator = restore_checkpoint_if_needed(\n args, checkpoint_manager, optimizer, train_iterator, val_iterator\n )\n\n # --- Define loss and train step (close over args) ---\n def tokenizer_loss_fn(\n model: TokenizerVQVAE, inputs: dict, training: bool = False\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n mse = jnp.square(gt - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n gt_clipped = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_clipped, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_clipped, recon)).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.ModelAndOptimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: TokenizerVQVAE) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return tokenizer_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(\n tokenizer: TokenizerVQVAE, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n tokenizer.eval()\n (loss, (recon, metrics)) = tokenizer_loss_fn(tokenizer, inputs, training=False)\n return loss, recon, metrics\n\n def calculate_validation_metrics(val_dataloader, tokenizer):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n batch = None\n recon = None\n for batch in val_dataloader:\n loss, recon, metrics = val_step(tokenizer, batch)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_loss = np.mean(loss_per_step)\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics[""val_loss""] = val_loss\n return val_metrics, batch, recon\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n }\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n }\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_batch = next(dataloader_train)\n compiled = train_step.lower(optimizer, first_batch).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_batch], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for batch in dataloader_train:\n # --- Train step ---\n loss, recon, metrics = train_step(optimizer, batch)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon = calculate_validation_metrics(\n dataloader_val, optimizer.model\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = batch[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results and step % args.val_interval == 0:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""][0].clip(\n 0, 1\n )\n val_results[""val_comparison_seq""] = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_results[""val_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results and step % args.val_interval == 0:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(val_results[""gt_seq_val""][0])\n ),\n val_recon=wandb.Image(\n np.asarray(val_results[""recon_seq_val""][0])\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n assert checkpoint_manager is not None\n optimizer_state = nnx.state(optimizer)\n if val_iterator:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n if checkpoint_manager:\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab
11
+ 10,1229571,"jasmine/train_tokenizer.py",10,0,"",python,selection_mouse
12
+ 11,1230445,"jasmine/train_tokenizer.py",16,0,"",python,selection_mouse
13
+ 12,1230696,"jasmine/train_tokenizer.py",14,7,"environ",python,selection_mouse
14
+ 13,1230848,"jasmine/train_tokenizer.py",14,18,"environ.setdefault",python,selection_mouse
15
+ 14,1230848,"jasmine/train_tokenizer.py",14,50,"environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION",python,selection_mouse
16
+ 15,1230877,"jasmine/train_tokenizer.py",14,53,"environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ",python,selection_mouse
17
+ 16,1230878,"jasmine/train_tokenizer.py",14,58,"environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98",python,selection_mouse
18
+ 17,1230902,"jasmine/train_tokenizer.py",14,59,"environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98""",python,selection_mouse
19
+ 18,1230930,"jasmine/train_tokenizer.py",14,60,"environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")",python,selection_mouse
20
+ 19,1231261,"jasmine/train_tokenizer.py",74,0,"",python,selection_mouse
21
+ 20,1231267,"jasmine/train_tokenizer.py",73,0,"",python,selection_command
22
+ 21,1232099,"jasmine/train_tokenizer.py",68,0,"",python,selection_mouse
23
+ 22,1232416,"jasmine/train_tokenizer.py",72,0,"",python,selection_mouse
24
+ 23,1232849,"jasmine/train_tokenizer.py",10,0,"",python,selection_mouse
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-ea3d4740-7761-4d5b-b6ed-89bdaaf7e91f1760906482985-2025_10_19-22.42.16.510/source.csv ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,2933,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:42:16 PM [info] Activating crowd-code\n10:42:16 PM [info] Recording started\n10:42:16 PM [info] Initializing git provider using file system watchers...\n10:42:17 PM [info] Git repository found\n10:42:17 PM [info] Git provider initialized successfully\n10:42:18 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,5491,"TERMINAL",0,0,"bash",,terminal_focus
4
+ 4,8092,"TERMINAL",0,0,"uv pip show",,terminal_command
5
+ 5,8147,"TERMINAL",0,0,"]633;C",,terminal_output
6
+ 6,8195,"TERMINAL",0,0,"warning: Please provide a package name or names.\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output
7
+ 7,13077,"TERMINAL",0,0,"cd data/",,terminal_command
8
+ 8,13479,"TERMINAL",0,0,"ls",,terminal_command
9
+ 9,13488,"TERMINAL",0,0,"]633;Cjasmine_data pyproject.toml uv.lock _vizdoom _vizdoom.ini\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
10
+ 10,17201,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
11
+ 11,17253,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
12
+ 12,21685,"TERMINAL",0,0,"uv pip freeze",,terminal_command
13
+ 13,22010,"TERMINAL",0,0,"]633;Cabsl-py==2.1.0\r\naiohappyeyeballs==2.4.0\r\naiohttp==3.10.5\r\naiosignal==1.3.1\r\nale-py==0.9.0\r\nappnope==0.1.4\r\narray-record==0.8.1\r\nasttokens==2.4.1\r\nasync-timeout==4.0.3\r\nattrs==24.2.0\r\nautorom==0.4.2\r\nautorom-accept-rom-license==0.6.1\r\nblack==24.8.0\r\nbox2d-py==2.3.5\r\ncertifi==2024.8.30\r\ncffi==1.17.1\r\ncharset-normalizer==3.3.2\r\nclick==8.1.7\r\ncloudpickle==3.0.0\r\ncomm==0.2.2\r\ncontourpy==1.3.0\r\ncramjam==2.8.3\r\ncycler==0.12.1\r\ncython==3.0.11\r\ndatasets==3.0.0\r\ndebugpy==1.8.5\r\ndecorator==4.4.2\r\ndill==0.3.8\r\ndocstring-parser==0.17.0\r\netils==1.13.0\r\nexceptiongroup==1.2.2\r\nexecuting==2.1.0\r\nfarama-notifications==0.0.4\r\nfasteners==0.19\r\nfastparquet==2024.5.0\r\nffmpeg-python==0.2.0\r\nfilelock==3.16.0\r\nfonttools==4.53.1\r\nfrozenlist==1.4.1\r\nfsspec==2024.6.1\r\nfuture==1.0.0\r\nglcontext==3.0.0\r\nglfw==2.7.0\r\ngrpcio==1.66.1\r\ngym==0.26.2\r\ngym-notices==0.0.8\r\ngym3==0.3.3\r\ngymnasium==0.29.1\r\nhf-transfer==0.1.9\r\nhf-xet==1.1.10\r\nhuggingface-hub==0.24.7\r\nidna==3.8\r\nimageio==2.35.1\r\nimageio-ffmpeg==0.5.1\r\nimportlib-metadata==8.5.0\r\nimportlib-resources==6.4.5\r\niniconfig==2.0.0\r\ninquirerpy==0.3.4\r\nipykernel==6.29.5\r\nipython==8.18.1\r\nipywidgets==8.1.5\r\n-e file:///hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/data\r\njedi==0.19.1\r\njinja2==3.1.4\r\njupyter-client==8.6.2\r\njupyter-core==5.7.2\r\njupyterlab-widgets==3.0.13\r\nkiwisolver==1.4.7\r\nloguru==0.7.3\r\nlz4==4.3.3\r\nmarkdown==3.7\r\nmarkdown-it-py==3.0.0\r\nmarkupsafe==2.1.5\r\nmatplotlib==3.9.2\r\nmatplotlib-inline==0.1.7\r\nmdurl==0.1.2\r\nmoderngl==5.12.0\r\nmoviepy==1.0.3\r\nmpmath==1.3.0\r\nmujoco==3.2.5\r\nmujoco-py==2.1.2.14\r\nmultidict==6.1.0\r\nmultiprocess==0.70.16\r\nmypy-extensions==1.0.0\r\nnest-asyncio==1.6.0\r\nnetworkx==3.2.1\r\nnumpy==2.0.2\r\nnvidia-cublas-cu12==12.1.3.1\r\nnvidia-cuda-cupti-cu12==12.1.105\r\nnvidia-cuda-nvrtc-cu12==12.1.105\r\nnvidia-cuda-runtime-cu12==12.1.105\r\nnvidia-cudnn-cu12==9.1.0.70\r\nnvidia-cufft-cu12==11.0.2.54\r\nnvidia-curand-cu12==10.3.2.106\r\nnvidia-cusolver-cu12==11.4.5.107\r\nnvidia-cusparse-cu12==12.1.0.106\r\nnvidia-nccl-cu12==2.20.5\r\nnvidia-nvjitlink-cu12==12.9.86\r\nnvidia-nvtx-cu12==12.1.105\r\nopencv-python==4.10.0.84\r\npackaging==24.1\r\npandas==2.2.2\r\nparso==0.8.4\r\npathspec==0.12.1\r\npexpect==4.9.0\r\npfzy==0.3.4\r\npillow==10.4.0\r\nplatformdirs==4.3.2\r\npluggy==1.5.0\r\nprocgen==0.10.7\r\nproglog==0.1.10\r\nprompt-toolkit==3.0.47\r\nprotobuf==5.28.1\r\npsutil==6.0.0\r\nptyprocess==0.7.0\r\npure-eval==0.2.3\r\npy==1.11.0\r\npyarrow==17.0.0\r\npycparser==2.22\r\npygame==2.6.1\r\npygments==2.18.0\r\npyopengl==3.1.7\r\npyparsing==3.1.4\r\npytest==7.0.1\r\npython-dateutil==2.9.0.post0\r\npytz==2024.2\r\npyyaml==6.0.2\r\npyzmq==26.2.0\r\nrequests==2.32.3\r\nrich==13.8.1\r\nsetuptools==80.9.0\r\nshimmy==1.3.0\r\nshtab==1.7.2\r\nsix==1.16.0\r\nstable-baselines3==2.3.2\r\nstack-data==0.6.3\r\nswig==4.2.1\r\nsympy==1.13.2\r\ntensorboard==2.17.1\r\ntensorboard-data-server==0.7.2\r\ntomli==2.0.1\r\ntorch==2.4.1\r\ntornado==6.4.1\r\ntqdm==4.66.5\r\ntraitlets==5.14.3\r\ntriton==3.0.0\r\ntypeguard==4.4.4\r\ntyping-extensions==4.12.2\r\ntyro==0.9.32\r\ntzdata==2024.1\r\nurllib3==2.2.2\r\nvizdoom==1.2.4\r\nwcwidth==0.2.13\r\nwerkzeug==3.0.4\r\nwidgetsnbextension==4.0.13\r\nxxhash==3.5.0\r\nyarl==1.11.1\r\nzipp==3.20.1\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
14
+ 14,22019,"TERMINAL",0,0,"",,terminal_output
15
+ 15,44759,"TERMINAL",0,0,"uv pip freeze > requirements_doom.txt",,terminal_command
16
+ 16,44764,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
17
+ 17,61615,"data/requirements_doom.txt",0,0,"absl-py==2.1.0\naiohappyeyeballs==2.4.0\naiohttp==3.10.5\naiosignal==1.3.1\nale-py==0.9.0\nappnope==0.1.4\narray-record==0.8.1\nasttokens==2.4.1\nasync-timeout==4.0.3\nattrs==24.2.0\nautorom==0.4.2\nautorom-accept-rom-license==0.6.1\nblack==24.8.0\nbox2d-py==2.3.5\ncertifi==2024.8.30\ncffi==1.17.1\ncharset-normalizer==3.3.2\nclick==8.1.7\ncloudpickle==3.0.0\ncomm==0.2.2\ncontourpy==1.3.0\ncramjam==2.8.3\ncycler==0.12.1\ncython==3.0.11\ndatasets==3.0.0\ndebugpy==1.8.5\ndecorator==4.4.2\ndill==0.3.8\ndocstring-parser==0.17.0\netils==1.13.0\nexceptiongroup==1.2.2\nexecuting==2.1.0\nfarama-notifications==0.0.4\nfasteners==0.19\nfastparquet==2024.5.0\nffmpeg-python==0.2.0\nfilelock==3.16.0\nfonttools==4.53.1\nfrozenlist==1.4.1\nfsspec==2024.6.1\nfuture==1.0.0\nglcontext==3.0.0\nglfw==2.7.0\ngrpcio==1.66.1\ngym==0.26.2\ngym-notices==0.0.8\ngym3==0.3.3\ngymnasium==0.29.1\nhf-transfer==0.1.9\nhf-xet==1.1.10\nhuggingface-hub==0.24.7\nidna==3.8\nimageio==2.35.1\nimageio-ffmpeg==0.5.1\nimportlib-metadata==8.5.0\nimportlib-resources==6.4.5\niniconfig==2.0.0\ninquirerpy==0.3.4\nipykernel==6.29.5\nipython==8.18.1\nipywidgets==8.1.5\n-e file:///hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/data\njedi==0.19.1\njinja2==3.1.4\njupyter-client==8.6.2\njupyter-core==5.7.2\njupyterlab-widgets==3.0.13\nkiwisolver==1.4.7\nloguru==0.7.3\nlz4==4.3.3\nmarkdown==3.7\nmarkdown-it-py==3.0.0\nmarkupsafe==2.1.5\nmatplotlib==3.9.2\nmatplotlib-inline==0.1.7\nmdurl==0.1.2\nmoderngl==5.12.0\nmoviepy==1.0.3\nmpmath==1.3.0\nmujoco==3.2.5\nmujoco-py==2.1.2.14\nmultidict==6.1.0\nmultiprocess==0.70.16\nmypy-extensions==1.0.0\nnest-asyncio==1.6.0\nnetworkx==3.2.1\nnumpy==2.0.2\nnvidia-cublas-cu12==12.1.3.1\nnvidia-cuda-cupti-cu12==12.1.105\nnvidia-cuda-nvrtc-cu12==12.1.105\nnvidia-cuda-runtime-cu12==12.1.105\nnvidia-cudnn-cu12==9.1.0.70\nnvidia-cufft-cu12==11.0.2.54\nnvidia-curand-cu12==10.3.2.106\nnvidia-cusolver-cu12==11.4.5.107\nnvidia-cusparse-cu12==12.1.0.106\nnvidia-nccl-cu12==2.20.5\nnvidia-nvjitlink-cu12==12.9.86\nnvidia-nvtx-cu12==12.1.105\nopencv-python==4.10.0.84\npackaging==24.1\npandas==2.2.2\nparso==0.8.4\npathspec==0.12.1\npexpect==4.9.0\npfzy==0.3.4\npillow==10.4.0\nplatformdirs==4.3.2\npluggy==1.5.0\nprocgen==0.10.7\nproglog==0.1.10\nprompt-toolkit==3.0.47\nprotobuf==5.28.1\npsutil==6.0.0\nptyprocess==0.7.0\npure-eval==0.2.3\npy==1.11.0\npyarrow==17.0.0\npycparser==2.22\npygame==2.6.1\npygments==2.18.0\npyopengl==3.1.7\npyparsing==3.1.4\npytest==7.0.1\npython-dateutil==2.9.0.post0\npytz==2024.2\npyyaml==6.0.2\npyzmq==26.2.0\nrequests==2.32.3\nrich==13.8.1\nsetuptools==80.9.0\nshimmy==1.3.0\nshtab==1.7.2\nsix==1.16.0\nstable-baselines3==2.3.2\nstack-data==0.6.3\nswig==4.2.1\nsympy==1.13.2\ntensorboard==2.17.1\ntensorboard-data-server==0.7.2\ntomli==2.0.1\ntorch==2.4.1\ntornado==6.4.1\ntqdm==4.66.5\ntraitlets==5.14.3\ntriton==3.0.0\ntypeguard==4.4.4\ntyping-extensions==4.12.2\ntyro==0.9.32\ntzdata==2024.1\nurllib3==2.2.2\nvizdoom==1.2.4\nwcwidth==0.2.13\nwerkzeug==3.0.4\nwidgetsnbextension==4.0.13\nxxhash==3.5.0\nyarl==1.11.1\nzipp==3.20.1\n",pip-requirements,tab
18
+ 18,83408,"TERMINAL",0,0,"",,terminal_command
19
+ 19,205566,"TERMINAL",0,0,"bash",,terminal_focus
20
+ 20,231933,"TERMINAL",0,0,"cd data/",,terminal_command
21
+ 21,236765,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
22
+ 22,241715,"TERMINAL",0,0,"python --version",,terminal_command
23
+ 23,241771,"TERMINAL",0,0,"]633;CPython 3.10.18\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
24
+ 24,246514,"TERMINAL",0,0,"bash",,terminal_focus
25
+ 25,455668,"TERMINAL",0,0,"bash",,terminal_focus
26
+ 26,459770,"TERMINAL",0,0,"swig --version",,terminal_command
27
+ 27,459830,"TERMINAL",0,0,"]633;C",,terminal_output
28
+ 28,460705,"TERMINAL",0,0,"Unable to find option or file '--version', Use 'swig -help' for more information.\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
29
+ 29,1074247,"TERMINAL",0,0,"ls",,terminal_command
30
+ 30,1074257,"TERMINAL",0,0,"]633;Cjasmine_data pyproject.toml requirements_doom.txt uv.lock _vizdoom _vizdoom.ini\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
31
+ 31,1088085,"TERMINAL",0,0,"ls data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",,terminal_command
32
+ 32,1088119,"TERMINAL",0,0,"]633;Cls: cannot access 'data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py': No such file or directory\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
33
+ 33,1097483,"TERMINAL",0,0,"ls .venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",,terminal_command
34
+ 34,1097487,"TERMINAL",0,0,"]633;C.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine/data",,terminal_output
35
+ 35,1099456,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",0,0,"""""""\nSave util taken from stable_baselines\nused to serialize data (class parameters) of model classes\n""""""\n\nimport base64\nimport functools\nimport io\nimport json\nimport os\nimport pathlib\nimport pickle\nimport warnings\nimport zipfile\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport cloudpickle\nimport torch as th\n\nimport stable_baselines3 as sb3\nfrom stable_baselines3.common.type_aliases import TensorDict\nfrom stable_baselines3.common.utils import get_device, get_system_info\n\n\ndef recursive_getattr(obj: Any, attr: str, *args) -> Any:\n """"""\n Recursive version of getattr\n taken from https://stackoverflow.com/questions/31174295\n\n Ex:\n > MyObject.sub_object = SubObject(name='test')\n > recursive_getattr(MyObject, 'sub_object.name') # return test\n :param obj:\n :param attr: Attribute to retrieve\n :return: The attribute\n """"""\n\n def _getattr(obj: Any, attr: str) -> Any:\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj, *attr.split(""."")])\n\n\ndef recursive_setattr(obj: Any, attr: str, val: Any) -> None:\n """"""\n Recursive version of setattr\n taken from https://stackoverflow.com/questions/31174295\n\n Ex:\n > MyObject.sub_object = SubObject(name='test')\n > recursive_setattr(MyObject, 'sub_object.name', 'hello')\n :param obj:\n :param attr: Attribute to set\n :param val: New value of the attribute\n """"""\n pre, _, post = attr.rpartition(""."")\n return setattr(recursive_getattr(obj, pre) if pre else obj, post, val)\n\n\ndef is_json_serializable(item: Any) -> bool:\n """"""\n Test if an object is serializable into JSON\n\n :param item: The object to be tested for JSON serialization.\n :return: True if object is JSON serializable, false otherwise.\n """"""\n # Try with try-except struct.\n json_serializable = True\n try:\n _ = json.dumps(item)\n except TypeError:\n json_serializable = False\n return json_serializable\n\n\ndef data_to_json(data: Dict[str, Any]) -> str:\n """"""\n Turn data (class parameters) into a JSON string for storing\n\n :param data: Dictionary of class parameters to be\n stored. Items that are not JSON serializable will be\n pickled with Cloudpickle and stored as bytearray in\n the JSON file\n :return: JSON string of the data serialized.\n """"""\n # First, check what elements can not be JSONfied,\n # and turn them into byte-strings\n serializable_data = {}\n for data_key, data_item in data.items():\n # See if object is JSON serializable\n if is_json_serializable(data_item):\n # All good, store as it is\n serializable_data[data_key] = data_item\n else:\n # Not serializable, cloudpickle it into\n # bytes and convert to base64 string for storing.\n # Also store type of the class for consumption\n # from other languages/humans, so we have an\n # idea what was being stored.\n base64_encoded = base64.b64encode(cloudpickle.dumps(data_item)).decode()\n\n # Use "":"" to make sure we do\n # not override these keys\n # when we include variables of the object later\n cloudpickle_serialization = {\n "":type:"": str(type(data_item)),\n "":serialized:"": base64_encoded,\n }\n\n # Add first-level JSON-serializable items of the\n # object for further details (but not deeper than this to\n # avoid deep nesting).\n # First we check that object has attributes (not all do,\n # e.g. numpy scalars)\n if hasattr(data_item, ""__dict__"") or isinstance(data_item, dict):\n # Take elements from __dict__ for custom classes\n item_generator = data_item.items if isinstance(data_item, dict) else data_item.__dict__.items\n for variable_name, variable_item in item_generator():\n # Check if serializable. If not, just include the\n # string-representation of the object.\n if is_json_serializable(variable_item):\n cloudpickle_serialization[variable_name] = variable_item\n else:\n cloudpickle_serialization[variable_name] = str(variable_item)\n\n serializable_data[data_key] = cloudpickle_serialization\n json_string = json.dumps(serializable_data, indent=4)\n return json_string\n\n\ndef json_to_data(json_string: str, custom_objects: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n """"""\n Turn JSON serialization of class-parameters back into dictionary.\n\n :param json_string: JSON serialization of the class-parameters\n that should be loaded.\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :return: Loaded class parameters.\n """"""\n if custom_objects is not None and not isinstance(custom_objects, dict):\n raise ValueError(""custom_objects argument must be a dict or None"")\n\n json_dict = json.loads(json_string)\n # This will be filled with deserialized data\n return_data = {}\n for data_key, data_item in json_dict.items():\n if custom_objects is not None and data_key in custom_objects.keys():\n # If item is provided in custom_objects, replace\n # the one from JSON with the one in custom_objects\n return_data[data_key] = custom_objects[data_key]\n elif isinstance(data_item, dict) and "":serialized:"" in data_item.keys():\n # If item is dictionary with "":serialized:""\n # key, this means it is serialized with cloudpickle.\n serialization = data_item["":serialized:""]\n # Try-except deserialization in case we run into\n # errors. If so, we can tell bit more information to\n # user.\n try:\n base64_object = base64.b64decode(serialization.encode())\n deserialized_object = cloudpickle.loads(base64_object)\n except (RuntimeError, TypeError, AttributeError) as e:\n warnings.warn(\n f""Could not deserialize object {data_key}. ""\n ""Consider using `custom_objects` argument to replace ""\n ""this object.\n""\n f""Exception: {e}""\n )\n else:\n return_data[data_key] = deserialized_object\n else:\n # Read as it is\n return_data[data_key] = data_item\n return return_data\n\n\[email protected]\ndef open_path(\n path: Union[str, pathlib.Path, io.BufferedIOBase], mode: str, verbose: int = 0, suffix: Optional[str] = None\n) -> Union[io.BufferedWriter, io.BufferedReader, io.BytesIO]:\n """"""\n Opens a path for reading or writing with a preferred suffix and raises debug information.\n If the provided path is a derivative of io.BufferedIOBase it ensures that the file\n matches the provided mode, i.e. If the mode is read (""r"", ""read"") it checks that the path is readable.\n If the mode is write (""w"", ""write"") it checks that the file is writable.\n\n If the provided path is a string or a pathlib.Path, it ensures that it exists. If the mode is ""read""\n it checks that it exists, if it doesn't exist it attempts to read path.suffix if a suffix is provided.\n If the mode is ""write"" and the path does not exist, it creates all the parent folders. If the path\n points to a folder, it changes the path to path_2. If the path already exists and verbose >= 2,\n it raises a warning.\n\n :param path: the path to open.\n if save_path is a str or pathlib.Path and mode is ""w"", single dispatch ensures that the\n path actually exists. If path is a io.BufferedIOBase the path exists.\n :param mode: how to open the file. ""w""|""write"" for writing, ""r""|""read"" for reading.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param suffix: The preferred suffix. If mode is ""w"" then the opened file has the suffix.\n If mode is ""r"" then we attempt to open the path. If an error is raised and the suffix\n is not None, we attempt to open the path with the suffix.\n :return:\n """"""\n # Note(antonin): the true annotation should be IO[bytes]\n # but there is not easy way to check that\n allowed_types = (io.BufferedWriter, io.BufferedReader, io.BytesIO, io.BufferedRandom)\n if not isinstance(path, allowed_types):\n raise TypeError(f""Path {path} parameter has invalid type: expected one of {allowed_types}."")\n if path.closed:\n raise ValueError(f""File stream {path} is closed."")\n mode = mode.lower()\n try:\n mode = {""write"": ""w"", ""read"": ""r"", ""w"": ""w"", ""r"": ""r""}[mode]\n except KeyError as e:\n raise ValueError(""Expected mode to be either 'w' or 'r'."") from e\n if (""w"" == mode) and not path.writable() or (""r"" == mode) and not path.readable():\n error_msg = ""writable"" if ""w"" == mode else ""readable""\n raise ValueError(f""Expected a {error_msg} file."")\n return path\n\n\n@open_path.register(str)\ndef open_path_str(path: str, mode: str, verbose: int = 0, suffix: Optional[str] = None) -> io.BufferedIOBase:\n """"""\n Open a path given by a string. If writing to the path, the function ensures\n that the path exists.\n\n :param path: the path to open. If mode is ""w"" then it ensures that the path exists\n by creating the necessary folders and renaming path if it points to a folder.\n :param mode: how to open the file. ""w"" for writing, ""r"" for reading.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param suffix: The preferred suffix. If mode is ""w"" then the opened file has the suffix.\n If mode is ""r"" then we attempt to open the path. If an error is raised and the suffix\n is not None, we attempt to open the path with the suffix.\n :return:\n """"""\n return open_path_pathlib(pathlib.Path(path), mode, verbose, suffix)\n\n\n@open_path.register(pathlib.Path)\ndef open_path_pathlib(path: pathlib.Path, mode: str, verbose: int = 0, suffix: Optional[str] = None) -> io.BufferedIOBase:\n """"""\n Open a path given by a string. If writing to the path, the function ensures\n that the path exists.\n\n :param path: the path to check. If mode is ""w"" then it\n ensures that the path exists by creating the necessary folders and\n renaming path if it points to a folder.\n :param mode: how to open the file. ""w"" for writing, ""r"" for reading.\n :param verbose: Verbosity level: 0 for no output, 2 for indicating if path without suffix is not found when mode is ""r""\n :param suffix: The preferred suffix. If mode is ""w"" then the opened file has the suffix.\n If mode is ""r"" then we attempt to open the path. If an error is raised and the suffix\n is not None, we attempt to open the path with the suffix.\n :return:\n """"""\n if mode not in (""w"", ""r""):\n raise ValueError(""Expected mode to be either 'w' or 'r'."")\n\n if mode == ""r"":\n try:\n return open_path(path.open(""rb""), mode, verbose, suffix)\n except FileNotFoundError as error:\n if suffix is not None and suffix != """":\n newpath = pathlib.Path(f""{path}.{suffix}"")\n if verbose >= 2:\n warnings.warn(f""Path '{path}' not found. Attempting {newpath}."")\n path, suffix = newpath, None\n else:\n raise error\n else:\n try:\n if path.suffix == """" and suffix is not None and suffix != """":\n path = pathlib.Path(f""{path}.{suffix}"")\n if path.exists() and path.is_file() and verbose >= 2:\n warnings.warn(f""Path '{path}' exists, will overwrite it."")\n return open_path(path.open(""wb""), mode, verbose, suffix)\n except IsADirectoryError:\n warnings.warn(f""Path '{path}' is a folder. Will save instead to {path}_2"")\n path = pathlib.Path(f""{path}_2"")\n except FileNotFoundError: # Occurs when the parent folder doesn't exist\n warnings.warn(f""Path '{path.parent}' does not exist. Will create it."")\n path.parent.mkdir(exist_ok=True, parents=True)\n\n # if opening was successful uses the open_path() function\n # if opening failed with IsADirectory|FileNotFound, calls open_path_pathlib\n # with corrections\n # if reading failed with FileNotFoundError, calls open_path_pathlib with suffix\n return open_path_pathlib(path, mode, verbose, suffix)\n\n\ndef save_to_zip_file(\n save_path: Union[str, pathlib.Path, io.BufferedIOBase],\n data: Optional[Dict[str, Any]] = None,\n params: Optional[Dict[str, Any]] = None,\n pytorch_variables: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n) -> None:\n """"""\n Save model data to a zip archive.\n\n :param save_path: Where to store the model.\n if save_path is a str or pathlib.Path ensures that the path actually exists.\n :param data: Class parameters being stored (non-PyTorch variables)\n :param params: Model parameters being stored expected to contain an entry for every\n state_dict with its name and the state_dict.\n :param pytorch_variables: Other PyTorch variables expected to contain name and value of the variable.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n """"""\n file = open_path(save_path, ""w"", verbose=0, suffix=""zip"")\n # data/params can be None, so do not\n # try to serialize them blindly\n if data is not None:\n serialized_data = data_to_json(data)\n\n # Create a zip-archive and write our objects there.\n with zipfile.ZipFile(file, mode=""w"") as archive:\n # Do not try to save ""None"" elements\n if data is not None:\n archive.writestr(""data"", serialized_data)\n if pytorch_variables is not None:\n with archive.open(""pytorch_variables.pth"", mode=""w"", force_zip64=True) as pytorch_variables_file:\n th.save(pytorch_variables, pytorch_variables_file)\n if params is not None:\n for file_name, dict_ in params.items():\n with archive.open(file_name + "".pth"", mode=""w"", force_zip64=True) as param_file:\n th.save(dict_, param_file)\n # Save metadata: library version when file was saved\n archive.writestr(""_stable_baselines3_version"", sb3.__version__)\n # Save system info about the current python env\n archive.writestr(""system_info.txt"", get_system_info(print_info=False)[1])\n\n if isinstance(save_path, (str, pathlib.Path)):\n file.close()\n\n\ndef save_to_pkl(path: Union[str, pathlib.Path, io.BufferedIOBase], obj: Any, verbose: int = 0) -> None:\n """"""\n Save an object to path creating the necessary folders along the way.\n If the path exists and is a directory, it will raise a warning and rename the path.\n If a suffix is provided in the path, it will use that suffix, otherwise, it will use '.pkl'.\n\n :param path: the path to open.\n if save_path is a str or pathlib.Path and mode is ""w"", single dispatch ensures that the\n path actually exists. If path is a io.BufferedIOBase the path exists.\n :param obj: The object to save.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n """"""\n file = open_path(path, ""w"", verbose=verbose, suffix=""pkl"")\n # Use protocol>=4 to support saving replay buffers >= 4Gb\n # See https://docs.python.org/3/library/pickle.html\n pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)\n if isinstance(path, (str, pathlib.Path)):\n file.close()\n\n\ndef load_from_pkl(path: Union[str, pathlib.Path, io.BufferedIOBase], verbose: int = 0) -> Any:\n """"""\n Load an object from the path. If a suffix is provided in the path, it will use that suffix.\n If the path does not exist, it will attempt to load using the .pkl suffix.\n\n :param path: the path to open.\n if save_path is a str or pathlib.Path and mode is ""w"", single dispatch ensures that the\n path actually exists. If path is a io.BufferedIOBase the path exists.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n """"""\n file = open_path(path, ""r"", verbose=verbose, suffix=""pkl"")\n obj = pickle.load(file)\n if isinstance(path, (str, pathlib.Path)):\n file.close()\n return obj\n\n\ndef load_from_zip_file(\n load_path: Union[str, pathlib.Path, io.BufferedIOBase],\n load_data: bool = True,\n custom_objects: Optional[Dict[str, Any]] = None,\n device: Union[th.device, str] = ""auto"",\n verbose: int = 0,\n print_system_info: bool = False,\n) -> Tuple[Optional[Dict[str, Any]], TensorDict, Optional[TensorDict]]:\n """"""\n Load model data from a .zip archive\n\n :param load_path: Where to load the model from\n :param load_data: Whether we should load and return data\n (class parameters). Mainly used by 'load_parameters' to only load model parameters (weights)\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :param device: Device on which the code should run.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param print_system_info: Whether to print or not the system info\n about the saved model.\n :return: Class parameters, model state_dicts (aka ""params"", dict of state_dict)\n and dict of pytorch variables\n """"""\n file = open_path(load_path, ""r"", verbose=verbose, suffix=""zip"")\n\n # set device to cpu if cuda is not available\n device = get_device(device=device)\n\n # Open the zip archive and load data\n try:\n with zipfile.ZipFile(file) as archive:\n namelist = archive.namelist()\n # If data or parameters is not in the\n # zip archive, assume they were stored\n # as None (_save_to_file_zip allows this).\n data = None\n pytorch_variables = None\n params = {}\n\n # Debug system info first\n if print_system_info:\n if ""system_info.txt"" in namelist:\n print(""== SAVED MODEL SYSTEM INFO =="")\n print(archive.read(""system_info.txt"").decode())\n else:\n warnings.warn(\n ""The model was saved with SB3 <= 1.2.0 and thus cannot print system information."",\n UserWarning,\n )\n\n if ""data"" in namelist and load_data:\n # Load class parameters that are stored\n # with either JSON or pickle (not PyTorch variables).\n json_data = archive.read(""data"").decode()\n data = json_to_data(json_data, custom_objects=custom_objects)\n\n # Check for all .pth files and load them using th.load.\n # ""pytorch_variables.pth"" stores PyTorch variables, and any other .pth\n # files store state_dicts of variables with custom names (e.g. policy, policy.optimizer)\n pth_files = [file_name for file_name in namelist if os.path.splitext(file_name)[1] == "".pth""]\n for file_path in pth_files:\n with archive.open(file_path, mode=""r"") as param_file:\n # File has to be seekable, but param_file is not, so load in BytesIO first\n # fixed in python >= 3.7\n file_content = io.BytesIO()\n file_content.write(param_file.read())\n # go to start of file\n file_content.seek(0)\n # Load the parameters with the right ``map_location``.\n # Remove "".pth"" ending with splitext\n # Note(antonin): we cannot use weights_only=True, as it breaks with PyTorch 1.13, see GH#1911\n th_object = th.load(file_content, map_location=device, weights_only=False)\n # ""tensors.pth"" was renamed ""pytorch_variables.pth"" in v0.9.0, see PR #138\n if file_path == ""pytorch_variables.pth"" or file_path == ""tensors.pth"":\n # PyTorch variables (not state_dicts)\n pytorch_variables = th_object\n else:\n # State dicts. Store into params dictionary\n # with same name as in .zip file (without .pth)\n params[os.path.splitext(file_path)[0]] = th_object\n except zipfile.BadZipFile as e:\n # load_path wasn't a zip file\n raise ValueError(f""Error: the file {load_path} wasn't a zip-file"") from e\n finally:\n if isinstance(load_path, (str, pathlib.Path)):\n file.close()\n return data, params, pytorch_variables\n",python,tab
36
+ 36,1102104,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",4188,0,"",python,selection_mouse
37
+ 37,1108461,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",5092,0,"",python,selection_mouse
38
+ 38,1112406,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6294,0,"",python,selection_mouse
39
+ 39,1112418,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6293,0,"",python,selection_command
40
+ 40,1112650,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6293,1,")",python,selection_mouse
41
+ 41,1112652,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6258,35,"t = cloudpickle.loads(base64_object",python,selection_mouse
42
+ 42,1112654,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6255,38,"ject = cloudpickle.loads(base64_object",python,selection_mouse
43
+ 43,1112657,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6294,0,"",python,selection_command
44
+ 44,1112709,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6253,41,"object = cloudpickle.loads(base64_object)",python,selection_mouse
45
+ 45,1112710,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6251,43,"d_object = cloudpickle.loads(base64_object)",python,selection_mouse
46
+ 46,1112711,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6250,44,"ed_object = cloudpickle.loads(base64_object)",python,selection_mouse
47
+ 47,1112717,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6248,46,"ized_object = cloudpickle.loads(base64_object)",python,selection_mouse
48
+ 48,1112751,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6246,48,"alized_object = cloudpickle.loads(base64_object)",python,selection_mouse
49
+ 49,1112767,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6245,49,"ialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
50
+ 50,1112794,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6244,50,"rialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
51
+ 51,1112795,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6243,51,"erialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
52
+ 52,1112833,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6242,52,"serialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
53
+ 53,1112847,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6241,53,"eserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
54
+ 54,1112882,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6240,54,"deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
55
+ 55,1112883,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6239,55," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
56
+ 56,1112911,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6238,56," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
57
+ 57,1112933,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6237,57," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
58
+ 58,1112962,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6236,58," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
59
+ 59,1112994,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6235,59," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
60
+ 60,1113047,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6234,60," deserialized_object = cloudpickle.loads(base64_object)",python,selection_mouse
61
+ 61,1113401,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6234,0,"",python,selection_mouse
62
+ 62,1113504,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6224,16," ",python,selection_mouse
63
+ 63,1114025,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6234,0,"",python,selection_mouse
64
+ 64,1114086,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6224,16," ",python,selection_mouse
65
+ 65,1114236,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6224,71," deserialized_object = cloudpickle.loads(base64_object)\n",python,selection_mouse
66
+ 66,1115610,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6244,0,"",python,selection_mouse
67
+ 67,1115689,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6240,19,"deserialized_object",python,selection_mouse
68
+ 68,1115819,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6224,71," deserialized_object = cloudpickle.loads(base64_object)\n",python,selection_mouse
69
+ 69,1134605,"data/.venv/lib/python3.10/site-packages/stable_baselines3/common/save_util.py",6499,0,"",python,selection_mouse
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-eb7f35ad-ba70-4d81-93ba-d60c3e498ef11760100048914-2025_10_10-14.41.32.902/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-ee716c9f-2894-4e4a-83fd-1afc9628a5fd1760867766844-2025_10_19-11.57.29.153/source.csv ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,5,"jasmine/train_tokenizer.py",0,0,"import os\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nfrom typing import cast, Optional\n\nimport einops\nimport itertools\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 64\n image_width: int = 64\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 30_000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = True\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 50\n log_image_interval: int = 1000\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 1000\n log_checkpoint_keep_period: int = 20_000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[TokenizerVQVAE, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n return (\n TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n ),\n rng,\n )\n\n\ndef build_optimizer(model: TokenizerVQVAE, args: Args) -> nnx.ModelAndOptimizer:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.ModelAndOptimizer(model, tx)\n return optimizer\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n return mesh, replicated_sharding, videos_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.ModelAndOptimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> Optional[ocp.CheckpointManager]:\n if args.restore_ckpt or args.save_ckpt:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n else:\n return None\n\n\ndef restore_checkpoint_if_needed(\n args: Args,\n checkpoint_manager: Optional[ocp.CheckpointManager],\n optimizer: nnx.ModelAndOptimizer,\n train_iterator: grain.DataLoaderIterator,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int, nnx.ModelAndOptimizer, grain.DataLoaderIterator, grain.DataLoaderIterator\n]:\n step = 0\n if checkpoint_manager and restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n assert checkpoint_manager is not None\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(restore_step, args=restore_args)\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = restore_step or 0\n print(f""Restored dataloader and model state from step {step}"")\n return step, optimizer, train_iterator, val_iterator\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n tokenizer, rng = build_model(args, rng)\n\n _, params, _ = nnx.split(tokenizer, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer = build_optimizer(tokenizer, args)\n del tokenizer\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator = restore_checkpoint_if_needed(\n args, checkpoint_manager, optimizer, train_iterator, val_iterator\n )\n\n # --- Define loss and train step (close over args) ---\n def tokenizer_loss_fn(\n model: TokenizerVQVAE, inputs: dict, training: bool = False\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n mse = jnp.square(gt - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n gt_clipped = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_clipped, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_clipped, recon)).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.ModelAndOptimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: TokenizerVQVAE) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return tokenizer_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(\n tokenizer: TokenizerVQVAE, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n tokenizer.eval()\n (loss, (recon, metrics)) = tokenizer_loss_fn(tokenizer, inputs, training=False)\n return loss, recon, metrics\n\n def calculate_validation_metrics(val_dataloader, tokenizer):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n batch = None\n recon = None\n for batch in val_dataloader:\n loss, recon, metrics = val_step(tokenizer, batch)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_loss = np.mean(loss_per_step)\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics[""val_loss""] = val_loss\n return val_metrics, batch, recon\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n }\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n }\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_batch = next(dataloader_train)\n compiled = train_step.lower(optimizer, first_batch).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_batch], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for batch in dataloader_train:\n # --- Train step ---\n loss, recon, metrics = train_step(optimizer, batch)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon = calculate_validation_metrics(\n dataloader_val, optimizer.model\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = batch[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results and step % args.val_interval == 0:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""][0].clip(\n 0, 1\n )\n val_results[""val_comparison_seq""] = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_results[""val_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results and step % args.val_interval == 0:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(val_results[""gt_seq_val""][0])\n ),\n val_recon=wandb.Image(\n np.asarray(val_results[""recon_seq_val""][0])\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n assert checkpoint_manager is not None\n optimizer_state = nnx.state(optimizer)\n if val_iterator:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n if checkpoint_manager:\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab
3
+ 2,635,"jasmine/train_tokenizer.py",432,0,"",python,selection_mouse
4
+ 3,636,"jasmine/train_tokenizer.py",10,0,"",python,selection_command
5
+ 4,641,"jasmine/train_tokenizer.py",431,0,"",python,selection_command
6
+ 5,757,"jasmine/train_tokenizer.py",314,0,"",python,selection_mouse
7
+ 6,762,"jasmine/train_tokenizer.py",313,0,"",python,selection_command
8
+ 7,1705,"jasmine/train_tokenizer.py",345,0,"",python,selection_mouse
9
+ 8,1851,"jasmine/train_tokenizer.py",344,0,"",python,selection_command
10
+ 9,1873,"jasmine/train_tokenizer.py",385,0,"",python,selection_mouse
11
+ 10,1875,"jasmine/train_tokenizer.py",384,0,"",python,selection_command
12
+ 11,2600,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:57:29 AM [info] Activating crowd-code\n11:57:29 AM [info] Recording started\n11:57:29 AM [info] Initializing git provider using file system watchers...\n11:57:30 AM [info] Git repository found\n11:57:30 AM [info] Git provider initialized successfully\n11:57:30 AM [info] Initial git state: [object Object]\n",Log,tab
13
+ 12,4062,"jasmine/train_tokenizer.py",0,0,"",python,tab
14
+ 13,4663,"jasmine/train_tokenizer.py",445,0,"",python,selection_mouse
15
+ 14,4767,"jasmine/train_tokenizer.py",444,0,"",python,selection_command
16
+ 15,14740,"TERMINAL",0,0,"undefined[tum_cte0515@hkn1990 jasmine]$ git branch",,terminal_command
17
+ 16,14803,"TERMINAL",0,0,"]633;C[?1h=\r ablation/full-precision-training\r\n ablation/use-pytorch-dataloader\r\n action-mapper\r\n add-noise-to-combat-exposure-bias\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n:",,terminal_output
18
+ 17,15930,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
19
+ 18,17304,"TERMINAL",0,0,"branch",,terminal_command
20
+ 19,17310,"TERMINAL",0,0,"]633;Cprepend-action-maskgit\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
21
+ 20,279470,"jasmine/train_tokenizer.py",0,0,"",python,tab
22
+ 21,432951,"TERMINAL",0,0,"git diff",,terminal_command
23
+ 22,433028,"TERMINAL",0,0,"]633;C[?1h=\r\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
24
+ 23,444780,"TERMINAL",0,0,"",,terminal_focus
25
+ 24,458977,"TERMINAL",0,0,"queue",,terminal_command
26
+ 25,459031,"TERMINAL",0,0,"]633;C",,terminal_output
27
+ 26,459129,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Oct 19 12:05:08 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)",,terminal_output
28
+ 27,459900,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
29
+ 28,464279,"TERMINAL",0,0,"idling",,terminal_command
30
+ 29,464374,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Sun Oct 19 12:05:13 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 115 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated: 65 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle\rPartition accelerated-h200:\t 5 nodes idle",,terminal_output
31
+ 30,465417,"TERMINAL",0,0,"4",,terminal_output
32
+ 31,466334,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
33
+ 32,473648,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
34
+ 33,473672,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
35
+ 34,512244,"TERMINAL",0,0,"git checkout -b 'ablation/flash-attn-only-spatial'",,terminal_command
36
+ 35,512296,"TERMINAL",0,0,"]633;C",,terminal_output
37
+ 36,512484,"TERMINAL",0,0,"Switched to a new branch 'ablation/flash-attn-only-spatial'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
38
+ 37,515969,"",0,0,"Switched from branch 'prepend-action-maskgit' to 'ablation/flash-attn-only-spatial'",,git_branch_checkout
39
+ 38,516511,"jasmine/utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\ndef _get_spatiotemporal_positional_encoding(d_model: int, max_len: int = 5000):\n """"""\n Creates a function that applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n pe = jnp.zeros((max_len, d_model))\n position = jnp.arange(0, max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(jnp.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def _encode(x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = pe[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = pe[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n return _encode\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM, sow_weights=self.sow_weights)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM, sow_weights=self.sow_weights)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool = False,\n sow_activations: bool = False,\n sow_logits: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = _get_spatiotemporal_positional_encoding(\n self.model_dim, max_len=max_len\n )\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(\n self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n\n return x_BTNM\n\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_logits: bool = False,\n sow_weights: bool = False,\n sow_activations: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = _get_spatiotemporal_positional_encoding(\n self.model_dim, max_len=max_len\n )\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n\n def __init__(\n self,\n latent_dim: int,\n num_latents: int,\n dropout: float,\n dtype: jnp.dtype,\n rngs: nnx.Rngs,\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n self.dtype = dtype\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.normal(stddev=1)(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = x_DL.astype(self.dtype)\n codebook = self.codebook.value.astype(self.dtype)\n\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(codebook)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(\n query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs\n ):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = (\n jnp.pad(\n _merge_batch_dims(bias),\n ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K)),\n )\n if bias is not None\n else None\n )\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
40
+ 39,517069,"jasmine/utils/nn.py",12,0,"",python,selection_command
41
+ 40,518308,"jasmine/utils/nn.py",53,0,"",python,selection_command
42
+ 41,518411,"jasmine/utils/nn.py",54,0,"",python,selection_command
43
+ 42,518560,"jasmine/utils/nn.py",75,0,"",python,selection_command
44
+ 43,518713,"jasmine/utils/nn.py",86,0,"",python,selection_command
45
+ 44,518875,"jasmine/utils/nn.py",110,0,"",python,selection_command
46
+ 45,518975,"jasmine/utils/nn.py",124,0,"",python,selection_command
47
+ 46,519929,"jasmine/utils/nn.py",125,0,"",python,selection_command
48
+ 47,521426,"jasmine/utils/nn.py",126,0,"",python,selection_command
49
+ 48,521540,"jasmine/utils/nn.py",206,0,"",python,selection_command
50
+ 49,521662,"jasmine/utils/nn.py",214,0,"",python,selection_command
51
+ 50,521821,"jasmine/utils/nn.py",331,0,"",python,selection_command
52
+ 51,521926,"jasmine/utils/nn.py",339,0,"",python,selection_command
53
+ 52,522057,"jasmine/utils/nn.py",378,0,"",python,selection_command
54
+ 53,522193,"jasmine/utils/nn.py",444,0,"",python,selection_command
55
+ 54,522305,"jasmine/utils/nn.py",527,0,"",python,selection_command
56
+ 55,522488,"jasmine/utils/nn.py",585,0,"",python,selection_command
57
+ 56,522700,"jasmine/utils/nn.py",643,0,"",python,selection_command
58
+ 57,522813,"jasmine/utils/nn.py",644,0,"",python,selection_command
59
+ 58,522959,"jasmine/utils/nn.py",688,0,"",python,selection_command
60
+ 59,523116,"jasmine/utils/nn.py",700,0,"",python,selection_command
61
+ 60,523259,"jasmine/utils/nn.py",714,0,"",python,selection_command
62
+ 61,523410,"jasmine/utils/nn.py",788,0,"",python,selection_command
63
+ 62,523583,"jasmine/utils/nn.py",789,0,"",python,selection_command
64
+ 63,523720,"jasmine/utils/nn.py",806,0,"",python,selection_command
65
+ 64,523865,"jasmine/utils/nn.py",868,0,"",python,selection_command
66
+ 65,524022,"jasmine/utils/nn.py",880,0,"",python,selection_command
67
+ 66,524167,"jasmine/utils/nn.py",964,0,"",python,selection_command
68
+ 67,525298,"jasmine/utils/nn.py",1151,0,"",python,selection_command
69
+ 68,525874,"jasmine/utils/nn.py",1321,0,"",python,selection_command
70
+ 69,526417,"jasmine/utils/nn.py",1387,0,"",python,selection_command
71
+ 70,526853,"jasmine/utils/nn.py",1507,0,"",python,selection_command
72
+ 71,527153,"jasmine/utils/nn.py",1682,0,"",python,selection_command
73
+ 72,527469,"jasmine/utils/nn.py",1848,0,"",python,selection_command
74
+ 73,527747,"jasmine/utils/nn.py",2060,0,"",python,selection_command
75
+ 74,528035,"jasmine/utils/nn.py",2295,0,"",python,selection_command
76
+ 75,528339,"jasmine/utils/nn.py",2513,0,"",python,selection_command
77
+ 76,529378,"jasmine/utils/nn.py",2698,0,"",python,selection_command
78
+ 77,531209,"jasmine/utils/nn.py",2911,0,"",python,selection_command
79
+ 78,531882,"jasmine/utils/nn.py",3125,0,"",python,selection_command
80
+ 79,535244,"jasmine/utils/nn.py",3223,0,"",python,selection_mouse
81
+ 80,536448,"jasmine/utils/nn.py",3282,0,"",python,selection_mouse
82
+ 81,537218,"jasmine/utils/nn.py",3266,0,"",python,selection_mouse
83
+ 82,537481,"jasmine/utils/nn.py",3266,2,"se",python,selection_mouse
84
+ 83,537482,"jasmine/utils/nn.py",3266,10,"self.use_f",python,selection_mouse
85
+ 84,537660,"jasmine/utils/nn.py",3276,0,"",python,selection_mouse
86
+ 85,539043,"jasmine/utils/nn.py",3231,0,"",python,selection_mouse
87
+ 86,539088,"jasmine/utils/nn.py",17439,0,"",python,selection_command
88
+ 87,540327,"jasmine/utils/nn.py",17530,0,"",python,selection_mouse
89
+ 88,540335,"jasmine/utils/nn.py",17529,0,"",python,selection_command
90
+ 89,542608,"jasmine/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n print_action_indices: bool = True\n output_dir: str = ""gifs/""\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 1\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n use_gt_actions: bool = False\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n # FIXME (f.srambical): implement spatiotemporal KV caching and set decode=True\n decode=False,\n rngs=rngs,\n )\n\n # Need to delete lam decoder for checkpoint loading\n if not args.use_gt_actions:\n assert genie.lam is not None\n del genie.lam.decoder\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.ModelAndOptimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n assert args.dyna_type in [\n ""maskgit"",\n ""causal"",\n ], f""Invalid dynamics type: {args.dyna_type}""\n frames, _ = model.sample(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n args.maskgit_steps,\n )\n return frames\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(genie, rng, batch):\n batch[""videos""] = batch[""videos""][:, : args.start_frame]\n batch[""rng""] = rng\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n batch = next(dataloader)\n gt_video = jnp.asarray(batch[""videos""], dtype=jnp.float32) / 255.0\n batch[""videos""] = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n action_batch_E = None\n if not args.use_gt_actions:\n action_batch_E = genie.vq_encode(batch, training=False)\n batch[""latent_actions""] = action_batch_E\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(genie, rng, batch)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n\n gt = gt_video.clip(0, 1)[:, args.start_frame :]\n recon = recon_video_BSHWC.clip(0, 1)[:, args.start_frame :]\n\n ssim_vmap = jax.vmap(pix.ssim, in_axes=(0, 0))\n psnr_vmap = jax.vmap(pix.psnr, in_axes=(0, 0))\n ssim = ssim_vmap(gt, recon)\n psnr = psnr_vmap(gt, recon)\n per_frame_ssim = ssim.mean(0)\n per_frame_psnr = psnr.mean(0)\n avg_ssim = ssim.mean()\n avg_psnr = psnr.mean()\n\n print(""Per-frame SSIM:\n"", per_frame_ssim)\n print(""Per-frame PSNR:\n"", per_frame_psnr)\n\n print(f""SSIM: {avg_ssim}"")\n print(f""PSNR: {avg_psnr}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B = batch[""videos""].shape[0]\n if action_batch_E is not None:\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, args.seq_len - 1, 1))\n else:\n action_batch_BSm11 = jnp.reshape(\n batch[""actions""][:, :-1], (B, args.seq_len - 1, 1)\n )\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(B):\n if args.print_action_indices:\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * batch[""videos""].shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n\n os.makedirs(args.output_dir, exist_ok=True)\n imgs[0].save(\n os.path.join(args.output_dir, f""generation_{time.time()}.gif""),\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
91
+ 90,543597,"jasmine/utils/nn.py",0,0,"",python,tab
92
+ 91,551731,"jasmine/utils/nn.py",10448,0,"",python,selection_mouse
93
+ 92,551909,"jasmine/utils/nn.py",10448,2,"se",python,selection_mouse
94
+ 93,551910,"jasmine/utils/nn.py",10448,4,"self",python,selection_mouse
95
+ 94,551939,"jasmine/utils/nn.py",10448,5,"self.",python,selection_mouse
96
+ 95,551940,"jasmine/utils/nn.py",10448,7,"self.us",python,selection_mouse
97
+ 96,551953,"jasmine/utils/nn.py",10448,10,"self.use_f",python,selection_mouse
98
+ 97,551967,"jasmine/utils/nn.py",10448,11,"self.use_fl",python,selection_mouse
99
+ 98,551989,"jasmine/utils/nn.py",10448,13,"self.use_flas",python,selection_mouse
100
+ 99,552002,"jasmine/utils/nn.py",10448,14,"self.use_flash",python,selection_mouse
101
+ 100,552020,"jasmine/utils/nn.py",10448,16,"self.use_flash_a",python,selection_mouse
102
+ 101,552068,"jasmine/utils/nn.py",10448,17,"self.use_flash_at",python,selection_mouse
103
+ 102,552069,"jasmine/utils/nn.py",10448,18,"self.use_flash_att",python,selection_mouse
104
+ 103,552085,"jasmine/utils/nn.py",10448,19,"self.use_flash_atte",python,selection_mouse
105
+ 104,552101,"jasmine/utils/nn.py",10448,20,"self.use_flash_atten",python,selection_mouse
106
+ 105,552178,"jasmine/utils/nn.py",10448,55,"self.use_flash_attention, is_causal=True\n ),",python,selection_mouse
107
+ 106,552631,"jasmine/utils/nn.py",10448,25,"self.use_flash_attention,",python,selection_mouse
108
+ 107,552868,"jasmine/utils/nn.py",10448,24,"self.use_flash_attention",python,selection_mouse
109
+ 108,553782,"jasmine/utils/nn.py",10448,24,"",python,content
110
+ 109,554472,"jasmine/utils/nn.py",10448,0,"u",python,content
111
+ 110,554474,"jasmine/utils/nn.py",10449,0,"",python,selection_keyboard
112
+ 111,554540,"jasmine/utils/nn.py",10449,0,"s",python,content
113
+ 112,554541,"jasmine/utils/nn.py",10450,0,"",python,selection_keyboard
114
+ 113,554693,"jasmine/utils/nn.py",10450,0,"e",python,content
115
+ 114,554695,"jasmine/utils/nn.py",10451,0,"",python,selection_keyboard
116
+ 115,556942,"jasmine/utils/nn.py",10451,0,"_flash_attention",python,content
117
+ 116,558714,"jasmine/utils/nn.py",10467,0,"=",python,content
118
+ 117,558716,"jasmine/utils/nn.py",10468,0,"",python,selection_keyboard
119
+ 118,560273,"jasmine/utils/nn.py",10468,0,"F",python,content
120
+ 119,560274,"jasmine/utils/nn.py",10469,0,"",python,selection_keyboard
121
+ 120,560377,"jasmine/utils/nn.py",10469,0,"a",python,content
122
+ 121,560378,"jasmine/utils/nn.py",10470,0,"",python,selection_keyboard
123
+ 122,560605,"jasmine/utils/nn.py",10470,0,"l",python,content
124
+ 123,560607,"jasmine/utils/nn.py",10471,0,"",python,selection_keyboard
125
+ 124,561576,"jasmine/utils/nn.py",10471,0,"se",python,content
126
+ 125,563747,"jasmine/utils/nn.py",10472,0,"",python,selection_command
127
+ 126,566969,"jasmine/utils/nn.py",10449,0,"",python,selection_mouse
128
+ 127,567078,"jasmine/utils/nn.py",10449,2,"se",python,selection_mouse
129
+ 128,567099,"jasmine/utils/nn.py",10449,6,"se_fla",python,selection_mouse
130
+ 129,567123,"jasmine/utils/nn.py",10449,8,"se_flash",python,selection_mouse
131
+ 130,567142,"jasmine/utils/nn.py",10449,11,"se_flash_at",python,selection_mouse
132
+ 131,567154,"jasmine/utils/nn.py",10449,14,"se_flash_atten",python,selection_mouse
133
+ 132,567171,"jasmine/utils/nn.py",10449,16,"se_flash_attenti",python,selection_mouse
134
+ 133,567238,"jasmine/utils/nn.py",10449,18,"se_flash_attention",python,selection_mouse
135
+ 134,567239,"jasmine/utils/nn.py",10449,20,"se_flash_attention=F",python,selection_mouse
136
+ 135,567239,"jasmine/utils/nn.py",10449,21,"se_flash_attention=Fa",python,selection_mouse
137
+ 136,567240,"jasmine/utils/nn.py",10449,22,"se_flash_attention=Fal",python,selection_mouse
138
+ 137,567299,"jasmine/utils/nn.py",10449,23,"se_flash_attention=Fals",python,selection_mouse
139
+ 138,567334,"jasmine/utils/nn.py",10449,24,"se_flash_attention=False",python,selection_mouse
140
+ 139,567668,"jasmine/utils/nn.py",10449,23,"se_flash_attention=Fals",python,selection_mouse
141
+ 140,568495,"jasmine/utils/nn.py",10449,23,"",python,content
142
+ 141,568996,"jasmine/utils/nn.py",10449,0,"se_flash_attention=Fals",python,content
143
+ 142,569005,"jasmine/utils/nn.py",10449,0,"",python,selection_command
144
+ 143,569558,"jasmine/utils/nn.py",10448,25,"",python,content
145
+ 144,570022,"jasmine/utils/nn.py",10448,0,"self.use_flash_attention",python,content
146
+ 145,570028,"jasmine/utils/nn.py",10448,0,"",python,selection_command
147
+ 146,571427,"jasmine/utils/nn.py",10526,0,"",python,selection_mouse
148
+ 147,571457,"jasmine/utils/nn.py",10525,0,"",python,selection_command
149
+ 148,581715,"jasmine/utils/nn.py",9971,0,"",python,selection_mouse
150
+ 149,581775,"jasmine/utils/nn.py",9971,3,"sel",python,selection_mouse
151
+ 150,581799,"jasmine/utils/nn.py",9971,7,"self.us",python,selection_mouse
152
+ 151,581821,"jasmine/utils/nn.py",9971,8,"self.use",python,selection_mouse
153
+ 152,581836,"jasmine/utils/nn.py",9971,10,"self.use_f",python,selection_mouse
154
+ 153,581869,"jasmine/utils/nn.py",9971,12,"self.use_fla",python,selection_mouse
155
+ 154,581870,"jasmine/utils/nn.py",9971,14,"self.use_flash",python,selection_mouse
156
+ 155,581886,"jasmine/utils/nn.py",9971,15,"self.use_flash_",python,selection_mouse
157
+ 156,581904,"jasmine/utils/nn.py",9971,16,"self.use_flash_a",python,selection_mouse
158
+ 157,581918,"jasmine/utils/nn.py",9971,17,"self.use_flash_at",python,selection_mouse
159
+ 158,581950,"jasmine/utils/nn.py",9971,18,"self.use_flash_att",python,selection_mouse
160
+ 159,581984,"jasmine/utils/nn.py",9971,19,"self.use_flash_atte",python,selection_mouse
161
+ 160,582035,"jasmine/utils/nn.py",9971,20,"self.use_flash_atten",python,selection_mouse
162
+ 161,582173,"jasmine/utils/nn.py",9971,21,"self.use_flash_attent",python,selection_mouse
163
+ 162,582305,"jasmine/utils/nn.py",9971,22,"self.use_flash_attenti",python,selection_mouse
164
+ 163,582370,"jasmine/utils/nn.py",9971,23,"self.use_flash_attentio",python,selection_mouse
165
+ 164,582454,"jasmine/utils/nn.py",9971,24,"self.use_flash_attention",python,selection_mouse
166
+ 165,583104,"jasmine/utils/nn.py",9971,24,"",python,content
167
+ 166,584569,"jasmine/utils/nn.py",9971,0,"use_flash_attention=False",python,content
168
+ 167,597731,"jasmine/utils/nn.py",3266,0,"",python,selection_mouse
169
+ 168,598237,"jasmine/utils/nn.py",3266,5,"self.",python,selection_mouse
170
+ 169,598238,"jasmine/utils/nn.py",3266,19,"self.use_flash_atte",python,selection_mouse
171
+ 170,598238,"jasmine/utils/nn.py",3266,20,"self.use_flash_atten",python,selection_mouse
172
+ 171,598256,"jasmine/utils/nn.py",3266,21,"self.use_flash_attent",python,selection_mouse
173
+ 172,598333,"jasmine/utils/nn.py",3266,22,"self.use_flash_attenti",python,selection_mouse
174
+ 173,598536,"jasmine/utils/nn.py",3266,23,"self.use_flash_attentio",python,selection_mouse
175
+ 174,598567,"jasmine/utils/nn.py",3266,24,"self.use_flash_attention",python,selection_mouse
176
+ 175,600167,"jasmine/utils/nn.py",3290,0,"=False",python,content
177
+ 176,600167,"jasmine/utils/nn.py",3266,5,"",python,content
178
+ 177,602827,"jasmine/utils/nn.py",3419,0,"",python,selection_mouse
179
+ 178,605557,"jasmine/utils/nn.py",0,0,"",python,tab
180
+ 179,608088,"jasmine/utils/nn.py",3273,0,"",python,selection_mouse
181
+ 180,608876,"jasmine/utils/nn.py",3290,0,"",python,selection_mouse
182
+ 181,609034,"jasmine/utils/nn.py",3286,5,"False",python,selection_mouse
183
+ 182,609431,"jasmine/utils/nn.py",3281,0,"",python,selection_mouse
184
+ 183,610011,"jasmine/utils/nn.py",3237,0,"",python,selection_mouse
185
+ 184,610125,"jasmine/utils/nn.py",17441,0,"",python,selection_command
186
+ 185,615407,"TERMINAL",0,0,"git diff",,terminal_command
187
+ 186,615440,"TERMINAL",0,0,"]633;C[?1h=\rdiff --git a/jasmine/utils/nn.py b/jasmine/utils/nn.py\r\nindex 473e6be..dce4fb5 100644\r\n--- a/jasmine/utils/nn.py\r\n+++ b/jasmine/utils/nn.py\r\n@@ -101,7 +101,7 @@ class STBlock(nnx.Module):\r\n param_dtype=self.param_dtype,\r\n dtype=self.dtype,\r\n attention_fn=_create_flash_attention_fn(\r\n- self.use_flash_attention, is_causal=True\r\n+ use_flash_attention=False, is_causal=True\r\n ),\r\n rngs=rngs,\r\n decode=False,\r\n@@ -312,7 +312,7 @@ class TransformerBlock(nnx.Module):\r\n param_dtype=self.param_dtype,\r\n dtype=self.dtype,\r\n attention_fn=_create_flash_attention_fn(\r\n- self.use_flash_attention, is_causal=True\r\n+ use_flash_attention=False, is_causal=True\r\n ),\r\n rngs=rngs,\r\n decode=self.decode,\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
188
+ 187,627512,"TERMINAL",0,0,"idling",,terminal_command
189
+ 188,627617,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Sun Oct 19 12:07:56 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 113 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated: 65 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle\rPartition accelerated-h200:\t 5 nodes idle",,terminal_output
190
+ 189,628641,"TERMINAL",0,0,"7",,terminal_output
191
+ 190,629681,"TERMINAL",0,0,"8",,terminal_output
192
+ 191,630717,"TERMINAL",0,0,"94",,terminal_output
193
+ 192,631752,"TERMINAL",0,0,"8:00",,terminal_output
194
+ 193,632793,"TERMINAL",0,0,"1",,terminal_output
195
+ 194,633839,"TERMINAL",0,0,"2",,terminal_output
196
+ 195,634949,"TERMINAL",0,0,"3",,terminal_output
197
+ 196,635922,"TERMINAL",0,0,"4",,terminal_output
198
+ 197,636964,"TERMINAL",0,0,"5",,terminal_output
199
+ 198,638002,"TERMINAL",0,0,"7",,terminal_output
200
+ 199,639038,"TERMINAL",0,0,"8",,terminal_output
201
+ 200,640076,"TERMINAL",0,0,"9",,terminal_output
202
+ 201,641119,"TERMINAL",0,0,"10",,terminal_output
203
+ 202,641187,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-efd7ba8c-a234-4233-92f3-7b2e61adffff1760041314363-2025_10_09-22.22.58.118/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-fb287441-450c-488c-b0fb-c98a58fc5b261760876714373-2025_10_19-14.25.55.193/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
af40c12958422f63ff638ecf057ca5960a6e79dafb430c2e4343b991cc9cefcc/crowd-code-fc82b954-d473-479e-931a-c238d50a81b41761056077856-2025_10_21-16.15.07.951/source.csv ADDED
The diff for this file is too large to render. See raw diff