Zeng et al. "ABQ-LLM: Arbitrary-Bit Quantized Inference Acceleration for Large Language Models." AAAI Conference on Artificial Intelligence, 2025. doi:10.1609/AAAI.V39I21.34385
Markdown
[Zeng et al. "ABQ-LLM: Arbitrary-Bit Quantized Inference Acceleration for Large Language Models." AAAI Conference on Artificial Intelligence, 2025.](https://mlanthology.org/aaai/2025/zeng2025aaai-abq/) doi:10.1609/AAAI.V39I21.34385
BibTeX
@inproceedings{zeng2025aaai-abq,
title = {{ABQ-LLM: Arbitrary-Bit Quantized Inference Acceleration for Large Language Models}},
author = {Zeng, Chao and Liu, Songwei and Xie, Yusheng and Liu, Hong and Wang, Xiaojian and Wei, Miao and Yang, Shu and Chen, Fangmin and Mei, Xing},
booktitle = {AAAI Conference on Artificial Intelligence},
year = {2025},
pages = {22299-22307},
doi = {10.1609/AAAI.V39I21.34385},
url = {https://mlanthology.org/aaai/2025/zeng2025aaai-abq/}
}