| .. |
|
__init__.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
anthropic_provider.py
|
feat: claude paid optimize (#890)
|
2023-08-17 16:56:20 +08:00 |
|
azure_openai_provider.py
|
feat: optimize error raise (#820)
|
2023-08-13 00:59:36 +08:00 |
|
base.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
chatglm_provider.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
hosted.py
|
feat: claude paid optimize (#890)
|
2023-08-17 16:56:20 +08:00 |
|
huggingface_hub_provider.py
|
feat: adjust hf max tokens (#979)
|
2023-08-23 22:24:50 +08:00 |
|
localai_provider.py
|
feat: add LocalAI local embedding model support (#1021)
|
2023-08-29 22:22:02 +08:00 |
|
minimax_provider.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
openai_provider.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
openllm_provider.py
|
fix: remove openllm pypi package because of this package too large (#931)
|
2023-08-21 02:12:28 +08:00 |
|
replicate_provider.py
|
fix: replicate text generation model validate (#923)
|
2023-08-19 21:40:42 +08:00 |
|
spark_provider.py
|
feat: add spark v2 support (#885)
|
2023-08-17 15:08:57 +08:00 |
|
tongyi_provider.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
wenxin_provider.py
|
feat: server multi models support (#799)
|
2023-08-12 00:57:00 +08:00 |
|
xinference_provider.py
|
feat: optimize xinference request max token key and stop reason (#998)
|
2023-08-24 18:11:15 +08:00 |