auto-gptq-0.7.1+cu124 |
auto_gptq-0.7.1+cu124-cp310-cp310-linux_aarch64.whl
|
|
awq-0.1.0 |
awq-0.1.0-py3-none-any.whl
|
|
awq-inference-engine-0.0.0 |
awq_inference_engine-0.0.0-cp310-cp310-linux_aarch64.whl
|
|
bitsandbytes-0.39.1 |
bitsandbytes-0.39.1-py3-none-any.whl
|
|
cuda-python-12.4.0+0.g2be0aac.dirty |
cuda_python-12.4.0+0.g2be0aac.dirty-cp310-cp310-linux_aarch64.whl
|
|
cupy-13.0.0rc1 |
cupy-13.0.0rc1-cp310-cp310-linux_aarch64.whl
|
|
exllamav2-0.0.15 |
exllamav2-0.0.15-cp310-cp310-linux_aarch64.whl
|
|
flash-attn-2.5.7 |
flash_attn-2.5.7-cp310-cp310-linux_aarch64.whl
|
|
llama-cpp-python-0.2.70 |
llama_cpp_python-0.2.70-cp310-cp310-linux_aarch64.whl
|
|
mlc-chat-0.1.0 |
mlc_chat-0.1.0-cp310-cp310-linux_aarch64.whl
|
|
mlc-llm-0.1.1 |
mlc_llm-0.1.1-cp310-cp310-linux_aarch64.whl
|
|
onnxruntime-gpu-1.19.0 |
onnxruntime_gpu-1.19.0-cp310-cp310-linux_aarch64.whl
|
|
pycuda-2024.1 |
pycuda-2024.1-cp310-cp310-linux_aarch64.whl
|
|
tensorrt-llm-0.11.0.dev0 |
tensorrt_llm-0.11.0.dev0-cp310-cp310-linux_aarch64.whl
|
|
torch-2.3.0 |
torch-2.3.0-cp310-cp310-linux_aarch64.whl
|
|
torchaudio-2.3.0+952ea74 |
torchaudio-2.3.0+952ea74-cp310-cp310-linux_aarch64.whl
|
|
torchvision-0.18.0a0+6043bc2 |
torchvision-0.18.0a0+6043bc2-cp310-cp310-linux_aarch64.whl
|
|
tvm-0.16.0 |
tvm-0.16.0-cp310-cp310-linux_aarch64.whl
|
|