Kim et al. "LLMem: Estimating GPU Memory Usage for Fine-Tuning Pre-Trained LLMs." International Joint Conference on Artificial Intelligence, 2024.
Markdown
[Kim et al. "LLMem: Estimating GPU Memory Usage for Fine-Tuning Pre-Trained LLMs." International Joint Conference on Artificial Intelligence, 2024.](https://mlanthology.org/ijcai/2024/kim2024ijcai-llmem/)
BibTeX
@inproceedings{kim2024ijcai-llmem,
title = {{LLMem: Estimating GPU Memory Usage for Fine-Tuning Pre-Trained LLMs}},
author = {Kim, Taeho and Wang, Yanming and Chaturvedi, Vatshank and Gupta, Lokesh and Kim, Seyeon and Kwon, Yongin and Ha, Sangtae},
booktitle = {International Joint Conference on Artificial Intelligence},
year = {2024},
pages = {6324-6332},
url = {https://mlanthology.org/ijcai/2024/kim2024ijcai-llmem/}
}