You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I am getting an error when trying to finetune VisualBERT pretrained model on Hateful Meme Dataset. Not sure why it is looking for dev.jsonl when there is no such jsonl which came with the hateful_memes dataset package. Can somebody point me to the config changes to be done in MMF to make this work.
Code:
"""
Uncomment it if needed
"""
#os.environ['OC_DISABLE_DOT_ACCESS_WARNING']="1"
Namespace(config_override=None, local_rank=None, opts=['config=projects/visual_bert/configs/hateful_memes/from_coco.yaml', 'model=visual_bert', 'dataset=hateful_memes', 'run_type=train_val', 'checkpoint.resume_zoo=visual_bert.pretrained.cc.full', 'training.tensorboard=True', 'training.checkpoint_interval=50', 'training.evaluation_interval=50', 'training.max_updates=3000', 'training.log_interval=100', 'dataset_config.hateful_memes.max_features=100', 'dataset_config.hateful_memes.annotations.train[0]=/content/train_v10.jsonl', 'dataset_config.hateful_memes.annotations.val[0]=hateful_memes/defaults/annotations/dev_unseen.jsonl', 'dataset_config.hateful_memes.annotations.test[0]=hateful_memes/defaults/annotations/test_unseen.jsonl', 'dataset_config.hateful_memes.features.train[0]=/content/features', 'dataset_config.hateful_memes.features.val[0]=/content/features', 'dataset_config.hateful_memes.features.test[0]=/content/features', 'training.lr_ratio=0.3', 'training.use_warmup=True', 'training.batch_size=32', 'optimizer.params.lr=5.0e-05', 'env.save_dir=./sub1', 'env.tensorboard_logdir=logs/fit/sub1'])
/usr/local/lib/python3.7/dist-packages/omegaconf/dictconfig.py:252: UserWarning: Keys with dot (model.bert) are deprecated and will have different semantic meaning the next major version of OmegaConf (2.1)
See the compact keys issue for more details: omry/omegaconf#152
You can disable this warning by setting the environment variable OC_DISABLE_DOT_ACCESS_WARNING=1
warnings.warn(message=msg, category=UserWarning)
Overriding option config to projects/visual_bert/configs/hateful_memes/from_coco.yaml
Overriding option model to visual_bert
Overriding option datasets to hateful_memes
Overriding option run_type to train_val
Overriding option checkpoint.resume_zoo to visual_bert.pretrained.cc.full
Overriding option training.tensorboard to True
Overriding option training.checkpoint_interval to 50
Overriding option training.evaluation_interval to 50
Overriding option training.max_updates to 3000
Overriding option training.log_interval to 100
Overriding option dataset_config.hateful_memes.max_features to 100
Overriding option training.lr_ratio to 0.3
Overriding option training.use_warmup to True
Overriding option training.batch_size to 32
Overriding option optimizer.params.lr to 5.0e-05
Overriding option env.save_dir to ./sub1
Overriding option env.tensorboard_logdir to logs/fit/sub1
Using seed 22503996
Logging to: ./sub1/logs/train_2022-04-20T11:00:22.log
Downloading features.tar.gz: 100% 8.44G/8.44G [05:03<00:00, 27.8MB/s]
Downloading extras.tar.gz: 100% 211k/211k [00:00<00:00, 484kB/s]
Traceback (most recent call last):
File "/usr/local/bin/mmf_run", line 8, in
sys.exit(run())
File "/usr/local/lib/python3.7/dist-packages/mmf_cli/run.py", line 111, in run
main(configuration, predict=predict)
File "/usr/local/lib/python3.7/dist-packages/mmf_cli/run.py", line 40, in main
trainer.load()
File "/usr/local/lib/python3.7/dist-packages/mmf/trainers/base_trainer.py", line 59, in load
self.load_datasets()
File "/usr/local/lib/python3.7/dist-packages/mmf/trainers/base_trainer.py", line 83, in load_datasets
self.dataset_loader.load_datasets()
File "/usr/local/lib/python3.7/dist-packages/mmf/common/dataset_loader.py", line 18, in load_datasets
self.val_dataset.load(self.config)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/multi_dataset_loader.py", line 114, in load
self.build_datasets(config)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/multi_dataset_loader.py", line 131, in build_datasets
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
File "/usr/local/lib/python3.7/dist-packages/mmf/utils/build.py", line 106, in build_dataset
dataset = builder_instance.load_dataset(config, dataset_type)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/base_dataset_builder.py", line 96, in load_dataset
dataset = self.load(config, dataset_type, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/builders/hateful_memes/builder.py", line 39, in load
self.dataset = super().load(config, dataset_type, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset_builder.py", line 141, in load
dataset = dataset_class(config, dataset_type, imdb_idx)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/builders/hateful_memes/dataset.py", line 19, in init
super().init(dataset_name, config, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset.py", line 25, in init
self.annotation_db = self._build_annotation_db()
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset.py", line 39, in _build_annotation_db
return AnnotationDatabase(self.config, annotation_path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 24, in init
self._load_annotation_db(path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 32, in _load_annotation_db
self._load_jsonl(path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 39, in _load_jsonl
with PathManager.open(path, "r") as f:
File "/usr/local/lib/python3.7/dist-packages/mmf/utils/file_io.py", line 45, in open
newline=newline, FileNotFoundError: [Errno 2] No such file or directory: '/root/.cache/torch/mmf/data/datasets/hateful_memes/defaults/annotations/dev.jsonl'
The text was updated successfully, but these errors were encountered:
I am getting an error when trying to finetune VisualBERT pretrained model on Hateful Meme Dataset. Not sure why it is looking for dev.jsonl when there is no such jsonl which came with the hateful_memes dataset package. Can somebody point me to the config changes to be done in MMF to make this work.
Code:
"""
Uncomment it if needed
"""
#os.environ['OC_DISABLE_DOT_ACCESS_WARNING']="1"
os.chdir(home)
Define where image features are
feats_dir = os.path.join(home, "features")
Define where train.jsonl is
train_dir = os.path.join(home, "train_v10.jsonl")
!mmf_run config="projects/visual_bert/configs/hateful_memes/from_coco.yaml"
model="visual_bert"
dataset=hateful_memes
run_type=train_val
checkpoint.resume_zoo=visual_bert.pretrained.cc.full
training.tensorboard=True
training.checkpoint_interval=50
training.evaluation_interval=50
training.max_updates=3000
training.log_interval=100
dataset_config.hateful_memes.max_features=100
dataset_config.hateful_memes.annotations.train[0]=$train_dir
dataset_config.hateful_memes.annotations.val[0]=hateful_memes/defaults/annotations/dev_unseen.jsonl
dataset_config.hateful_memes.annotations.test[0]=hateful_memes/defaults/annotations/test_unseen.jsonl
dataset_config.hateful_memes.features.train[0]=$feats_dir
dataset_config.hateful_memes.features.val[0]=$feats_dir
dataset_config.hateful_memes.features.test[0]=$feats_dir
training.lr_ratio=0.3
training.use_warmup=True
training.batch_size=32
optimizer.params.lr=5.0e-05
env.save_dir=./sub1
env.tensorboard_logdir=logs/fit/sub1 \
Error Log:
Namespace(config_override=None, local_rank=None, opts=['config=projects/visual_bert/configs/hateful_memes/from_coco.yaml', 'model=visual_bert', 'dataset=hateful_memes', 'run_type=train_val', 'checkpoint.resume_zoo=visual_bert.pretrained.cc.full', 'training.tensorboard=True', 'training.checkpoint_interval=50', 'training.evaluation_interval=50', 'training.max_updates=3000', 'training.log_interval=100', 'dataset_config.hateful_memes.max_features=100', 'dataset_config.hateful_memes.annotations.train[0]=/content/train_v10.jsonl', 'dataset_config.hateful_memes.annotations.val[0]=hateful_memes/defaults/annotations/dev_unseen.jsonl', 'dataset_config.hateful_memes.annotations.test[0]=hateful_memes/defaults/annotations/test_unseen.jsonl', 'dataset_config.hateful_memes.features.train[0]=/content/features', 'dataset_config.hateful_memes.features.val[0]=/content/features', 'dataset_config.hateful_memes.features.test[0]=/content/features', 'training.lr_ratio=0.3', 'training.use_warmup=True', 'training.batch_size=32', 'optimizer.params.lr=5.0e-05', 'env.save_dir=./sub1', 'env.tensorboard_logdir=logs/fit/sub1'])
/usr/local/lib/python3.7/dist-packages/omegaconf/dictconfig.py:252: UserWarning: Keys with dot (model.bert) are deprecated and will have different semantic meaning the next major version of OmegaConf (2.1)
See the compact keys issue for more details: omry/omegaconf#152
You can disable this warning by setting the environment variable OC_DISABLE_DOT_ACCESS_WARNING=1
warnings.warn(message=msg, category=UserWarning)
Overriding option config to projects/visual_bert/configs/hateful_memes/from_coco.yaml
Overriding option model to visual_bert
Overriding option datasets to hateful_memes
Overriding option run_type to train_val
Overriding option checkpoint.resume_zoo to visual_bert.pretrained.cc.full
Overriding option training.tensorboard to True
Overriding option training.checkpoint_interval to 50
Overriding option training.evaluation_interval to 50
Overriding option training.max_updates to 3000
Overriding option training.log_interval to 100
Overriding option dataset_config.hateful_memes.max_features to 100
Overriding option training.lr_ratio to 0.3
Overriding option training.use_warmup to True
Overriding option training.batch_size to 32
Overriding option optimizer.params.lr to 5.0e-05
Overriding option env.save_dir to ./sub1
Overriding option env.tensorboard_logdir to logs/fit/sub1
Using seed 22503996
Logging to: ./sub1/logs/train_2022-04-20T11:00:22.log
Downloading features.tar.gz: 100% 8.44G/8.44G [05:03<00:00, 27.8MB/s]
Downloading extras.tar.gz: 100% 211k/211k [00:00<00:00, 484kB/s]
Traceback (most recent call last):
File "/usr/local/bin/mmf_run", line 8, in
sys.exit(run())
File "/usr/local/lib/python3.7/dist-packages/mmf_cli/run.py", line 111, in run
main(configuration, predict=predict)
File "/usr/local/lib/python3.7/dist-packages/mmf_cli/run.py", line 40, in main
trainer.load()
File "/usr/local/lib/python3.7/dist-packages/mmf/trainers/base_trainer.py", line 59, in load
self.load_datasets()
File "/usr/local/lib/python3.7/dist-packages/mmf/trainers/base_trainer.py", line 83, in load_datasets
self.dataset_loader.load_datasets()
File "/usr/local/lib/python3.7/dist-packages/mmf/common/dataset_loader.py", line 18, in load_datasets
self.val_dataset.load(self.config)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/multi_dataset_loader.py", line 114, in load
self.build_datasets(config)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/multi_dataset_loader.py", line 131, in build_datasets
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
File "/usr/local/lib/python3.7/dist-packages/mmf/utils/build.py", line 106, in build_dataset
dataset = builder_instance.load_dataset(config, dataset_type)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/base_dataset_builder.py", line 96, in load_dataset
dataset = self.load(config, dataset_type, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/builders/hateful_memes/builder.py", line 39, in load
self.dataset = super().load(config, dataset_type, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset_builder.py", line 141, in load
dataset = dataset_class(config, dataset_type, imdb_idx)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/builders/hateful_memes/dataset.py", line 19, in init
super().init(dataset_name, config, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset.py", line 25, in init
self.annotation_db = self._build_annotation_db()
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/mmf_dataset.py", line 39, in _build_annotation_db
return AnnotationDatabase(self.config, annotation_path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 24, in init
self._load_annotation_db(path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 32, in _load_annotation_db
self._load_jsonl(path)
File "/usr/local/lib/python3.7/dist-packages/mmf/datasets/databases/annotation_database.py", line 39, in _load_jsonl
with PathManager.open(path, "r") as f:
File "/usr/local/lib/python3.7/dist-packages/mmf/utils/file_io.py", line 45, in open
newline=newline,
FileNotFoundError: [Errno 2] No such file or directory: '/root/.cache/torch/mmf/data/datasets/hateful_memes/defaults/annotations/dev.jsonl'
The text was updated successfully, but these errors were encountered: