| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117 |
- from litellm import Router
- import os
- from conf.config import G4F_API
- from litellm import embedding
- from litellm import completion
- g4f_api_base = G4F_API + 'v1'
- g4f_sv_v2_api_base = "http://sv-v2:1337/v1"
- # response = embedding(
- # model = "text-embedding-ada-002", # add `openai/` prefix to model so litellm knows to route to OpenAI
- # api_base="https://api.chatanywhere.com.cn" , # set API Base of your Custom OpenAI Endpoint
- # api_key= "sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k",
- # headers={"Authorization": "Bearer sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k"},
- # input=["good morning from litellm"]
- # )
- # print(f"response embedding {response}")
- # https://docs.litellm.ai/docs/routing#fallbacks
- model_list = [
- { # list of model deployments
- "model_name": "g4f/gpt-3.5-turbo", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "123",
- "api_base": g4f_api_base + 'bad'
- },
- },
- { # list of model deployments
- "model_name": "g4f-sv-v2/gpt-3.5-turbo", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "123",
- "api_base": g4f_sv_v2_api_base + 'bad'
- },
- },
- { # list of model deployments
- "model_name": "chatanywhere/gpt-3.5-turbo", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k",
- "api_base": "https://api.chatanywhere.com.cn/v1" + 'bad',
- },
- },
- {
- "model_name": "qwen", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "123",
- "api_version": "",
- "api_base": "http://sv-v2:8002/v1",
- # 在 litellm.main compelte 中可以看到 litellm_params
- "extra_headers": {
- "Authorization": "Bearer OJVVpfICFrI*UaHrrXUaI5WM5mLOWAKQsEtTrD7Jugq8YUttvJSy3R0QqXzmvDrK0"
- }
- },
- },
- {
- "model_name": "kimi", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "123",
- "api_base": "http://sv-v2:8004/v1/",
- # 在 litellm.main compelte 中可以看到 litellm_params
- "extra_headers": {
- "Authorization": "Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJ1c2VyLWNlbnRlciIsImV4cCI6MTcyMDgxMzc3OSwiaWF0IjoxNzEzMDM3Nzc5LCJqdGkiOiJjb2RlM2twa3FxNHR0cmw3djRlMCIsInR5cCI6InJlZnJlc2giLCJzdWIiOiJjb2FxN3Jwa3FxNHR0cmkxdHRiZyIsInNwYWNlX2lkIjoiY29hcTdycGtxcTR0dHJpMXR0YjAiLCJhYnN0cmFjdF91c2VyX2lkIjoiY29hcTdycGtxcTR0dHJpMXR0YWcifQ.ZaCKPbKgbmwnkcSqn-w3s_fMlx0C956guZL9dO7bPMyU2GQ8be0DLoAG82a2IkMMU2XgKIRBwNOtkGX9fHMiTg"
- }
- },
- },
- {
- "model_name": "chatgml4", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "api_key": "123",
- "api_base": "http://sv-v2:8003/v1/",
- # 在 litellm.main compelte 中可以看到 litellm_params
- "extra_headers": {
- "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmcmVzaCI6ZmFsc2UsImlhdCI6MTcxMDgyNTY0MCwianRpIjoiNDUzMzMyNGUtNzI1ZS00ZWM1LTgyMWYtMDM4ODgzMGVkMTAxIiwidHlwZSI6InJlZnJlc2giLCJzdWIiOiI5NjRjZjI4NGQzN2E0MWVlOWE3ZmMxNDMxYWU4YjBjZiIsIm5iZiI6MTcxMDgyNTY0MCwiZXhwIjoxNzI2Mzc3NjQwLCJ1aWQiOiI2NGQyYjAxMjU3MDlhODQ4YTMyMTE0ZGIiLCJ1cGxhdGZvcm0iOiIiLCJyb2xlcyI6WyJ1bmF1dGhlZF91c2VyIl19.zVf7GZpwICO0jhhR1dpqhLh6xoufmJmHGMmOVhL1fEA"
- }
- },
- },
- {
- "model_name": "yiyan", # openai model name
- "litellm_params": { # params for litellm completion/embedding call
- "model": "gpt-3.5-turbo",
- "provider":"YiYan4",
- "api_key": "123",
- "api_version": "",
- "api_base": "http://pc:8003/v1/",
- # 在 litellm.main compelte 中可以看到 litellm_params
- },
- },
- ]
- router = Router(model_list=model_list,
- fallbacks=[{"g4f/gpt-3.5-turbo": ["gpt-3.5-turbo"]},
- {"g4f-sv-v2/gpt-3.5-turbo": ["gpt-3.5-turbo"]},
- {"chatanywhere/gpt-3.5-turbo": ["gpt-3.5-turbo"]},
- {"qwen": ["gpt-3.5-turbo"]},
- {"kimi": ["gpt-3.5-turbo"]},
- {"chatgml4": ["gpt-3.5-turbo"]},
- ],
- context_window_fallbacks=[{"kimi": ["gpt-3.5-turbo"]}, {"chatgml4": ["gpt-3.5-turbo"]}],
- set_verbose=True)
- def main():
- user_message = "你是"
- messages = [{"content": user_message, "role": "user"}]
- # normal fallback call
- # response = router.completion(model="g4f/gpt-3.5-turbo", messages=messages,)
- response = completion(model_list=model_list, model="yiyan", messages=messages,)
- print(f"response: {response}")
- if __name__ == "__main__":
- main()
|