-
-
Notifications
You must be signed in to change notification settings - Fork 120
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #28 from michealman114/main
6.S898 Final Project: Linear Mode Connectivity in Transformers
- Loading branch information
Showing
8 changed files
with
225 additions
and
0 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
@misc{adilova2023layerwise, | ||
title={Layer-wise Linear Mode Connectivity}, | ||
author={Linara Adilova and Maksym Andriushchenko and Michael Kamp and Asja Fischer and Martin Jaggi}, | ||
year={2023}, | ||
eprint={2307.06966}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.LG} | ||
} | ||
@misc{frankle2020linear, | ||
title={Linear Mode Connectivity and the Lottery Ticket Hypothesis}, | ||
author={Jonathan Frankle and Gintare Karolina Dziugaite and Daniel M. Roy and Michael Carbin}, | ||
year={2020}, | ||
eprint={1912.05671}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.LG} | ||
} | ||
@InProceedings{pmlr-v162-zhang22ao, | ||
title = {{PLATON}: Pruning Large Transformer Models with Upper Confidence Bound of Weight Importance}, | ||
author = {Zhang, Qingru and Zuo, Simiao and Liang, Chen and Bukharin, Alexander and He, Pengcheng and Chen, Weizhu and Zhao, Tuo}, | ||
booktitle = {Proceedings of the 39th International Conference on Machine Learning}, | ||
pages = {26809--26823}, | ||
year = {2022}, | ||
editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan}, | ||
volume = {162}, | ||
series = {Proceedings of Machine Learning Research}, | ||
month = {17--23 Jul}, | ||
publisher = {PMLR}, | ||
pdf = {https://proceedings.mlr.press/v162/zhang22ao/zhang22ao.pdf}, | ||
url = {https://proceedings.mlr.press/v162/zhang22ao.html}, | ||
abstract = {Large Transformer-based models have exhibited superior performance in various natural language processing and computer vision tasks. However, these models contain enormous amounts of parameters, which restrict their deployment to real-world applications. To reduce the model size, researchers prune these models based on the weights’ importance scores. However, such scores are usually estimated on mini-batches during training, which incurs large variability/uncertainty due to mini-batch sampling and complicated training dynamics. As a result, some crucial weights could be pruned by commonly used pruning methods because of such uncertainty, which makes training unstable and hurts generalization. To resolve this issue, we propose PLATON, which captures the uncertainty of importance scores by upper confidence bound of importance estimation. In particular, for the weights with low importance scores but high uncertainty, PLATON tends to retain them and explores their capacity. We conduct extensive experiments with several Transformer-based models on natural language understanding, question answering and image classification to validate the effectiveness of PLATON. Results demonstrate that PLATON manifests notable improvement under different sparsity levels. Our code is publicly available at https://github.com/QingruZhang/PLATON.} | ||
} | ||
@article{kwon2022fast, | ||
title={A fast post-training pruning framework for transformers}, | ||
author={Kwon, Woosuk and Kim, Sehoon and Mahoney, Michael W and Hassoun, Joseph and Keutzer, Kurt and Gholami, Amir}, | ||
journal={Advances in Neural Information Processing Systems}, | ||
volume={35}, | ||
pages={24101--24116}, | ||
year={2022} | ||
} | ||
@misc{dosovitskiy2021image, | ||
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, | ||
author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, | ||
year={2021}, | ||
eprint={2010.11929}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.CV} | ||
} | ||
@misc{shen2023data, | ||
title={Data Level Lottery Ticket Hypothesis for Vision Transformers}, | ||
author={Xuan Shen and Zhenglun Kong and Minghai Qin and Peiyan Dong and Geng Yuan and Xin Meng and Hao Tang and Xiaolong Ma and Yanzhi Wang}, | ||
year={2023}, | ||
eprint={2211.01484}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.CV} | ||
} | ||
@misc{chen2020lottery, | ||
title={The Lottery Ticket Hypothesis for Pre-trained BERT Networks}, | ||
author={Tianlong Chen and Jonathan Frankle and Shiyu Chang and Sijia Liu and Yang Zhang and Zhangyang Wang and Michael Carbin}, | ||
year={2020}, | ||
eprint={2007.12223}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.LG} | ||
} |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.