浏览代码

新增 AI 接口 litellm

mrh 1 年之前
父节点
当前提交
fde17d23b5
共有 4 个文件被更改,包括 223 次插入0 次删除
  1. 0 0
      ai/api/chat.py
  2. 47 0
      ai/client.py
  3. 118 0
      ai/fallbacks.py
  4. 58 0
      gpt.md

+ 0 - 0
ai/api/chat.py


+ 47 - 0
ai/client.py

@@ -0,0 +1,47 @@
+import datetime
+import os
+import re
+import time
+import os
+import sys
+sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+
+from dp.page import page
+from douyin import chat_test,base,conversation,user
+from conf.config import logger,OUTPUT,WORK_DIR
+from database.config import ai_yunying_db,minio_block
+from dataset import Table
+from DrissionPage import ChromiumPage
+from DrissionPage._elements.chromium_element import ChromiumElement
+from DrissionPage._units.listener import DataPacket
+import jsonpath
+from prefect import flow,task
+from prefect.tasks import Task,TaskRun
+from prefect.flows import Flow
+from prefect.states import State,StateType
+import pathlib
+from ai.api import chat
+from g4f import client
+from litellm import completion,get_llm_provider
+tab = base.tab
+from g4f.client import Client
+from litellm import get_supported_openai_params
+import os
+from litellm import completion
+
+## set ENV variables
+os.environ["OPENAI_API_KEY"] = "anything" #key is not used for proxy
+
+messages = [{ "content": "who are you","role": "user"}]
+
+response = completion(
+    model='gpt-3.5-turbo-16k', 
+    messages=messages,
+    # api_base="http://pc:8003/v1",
+    api_base="http://sv-v2:1337/v1",
+    custom_llm_provider="openai" # litellm will use the openai.ChatCompletion to make the request
+)
+print(response)
+# f = open('user.html', 'w')
+# f.write(tab.html)
+# logger.info(f"{tab.html}") 

+ 118 - 0
ai/fallbacks.py

@@ -0,0 +1,118 @@
+from litellm import Router
+import os
+from conf.config import G4F_API
+from litellm import embedding
+from litellm import completion
+
+g4f_api_base = G4F_API + 'v1'
+g4f_sv_v2_api_base = "http://sv-v2:1337/v1"
+
+# response = embedding(
+#   model = "text-embedding-ada-002",     # add `openai/` prefix to model so litellm knows to route to OpenAI
+#   api_base="https://api.chatanywhere.com.cn" ,      # set API Base of your Custom OpenAI Endpoint
+#   api_key= "sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k",
+#   headers={"Authorization": "Bearer sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k"},
+#   input=["good morning from litellm"]
+# )
+# print(f"response embedding {response}")
+
+
+# https://docs.litellm.ai/docs/routing#fallbacks
+model_list = [
+    { # list of model deployments 
+        "model_name": "g4f/gpt-3.5-turbo", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "123",
+            "api_base": g4f_api_base + 'bad'
+        },
+    }, 
+    { # list of model deployments 
+        "model_name": "g4f-sv-v2/gpt-3.5-turbo", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "123",
+            "api_base": g4f_sv_v2_api_base + 'bad'
+        },
+    }, 
+    { # list of model deployments 
+        "model_name": "chatanywhere/gpt-3.5-turbo", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "sk-SG7fZHFePis7ymx3Em58eb0prCjgUE2t3mplbU0iq54IBo1k",
+            "api_base": "https://api.chatanywhere.com.cn/v1" + 'bad',
+        },
+    }, 
+    {
+        "model_name": "qwen", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "123",
+            "api_version": "",
+            "api_base": "http://sv-v2:8002/v1",
+            # 在 litellm.main compelte 中可以看到 litellm_params
+            "extra_headers": {
+                "Authorization": "Bearer OJVVpfICFrI*UaHrrXUaI5WM5mLOWAKQsEtTrD7Jugq8YUttvJSy3R0QqXzmvDrK0"
+            }
+        },
+    }, 
+    {
+        "model_name": "kimi", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "123",
+            "api_base": "http://sv-v2:8004/v1/",
+            # 在 litellm.main compelte 中可以看到 litellm_params
+            "extra_headers": {
+                "Authorization": "Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJ1c2VyLWNlbnRlciIsImV4cCI6MTcyMDgxMzc3OSwiaWF0IjoxNzEzMDM3Nzc5LCJqdGkiOiJjb2RlM2twa3FxNHR0cmw3djRlMCIsInR5cCI6InJlZnJlc2giLCJzdWIiOiJjb2FxN3Jwa3FxNHR0cmkxdHRiZyIsInNwYWNlX2lkIjoiY29hcTdycGtxcTR0dHJpMXR0YjAiLCJhYnN0cmFjdF91c2VyX2lkIjoiY29hcTdycGtxcTR0dHJpMXR0YWcifQ.ZaCKPbKgbmwnkcSqn-w3s_fMlx0C956guZL9dO7bPMyU2GQ8be0DLoAG82a2IkMMU2XgKIRBwNOtkGX9fHMiTg"
+            }
+        },
+    }, 
+    {
+        "model_name": "chatgml4", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "api_key": "123",
+            "api_base": "http://sv-v2:8003/v1/",
+            # 在 litellm.main compelte 中可以看到 litellm_params
+            "extra_headers": {
+                "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmcmVzaCI6ZmFsc2UsImlhdCI6MTcxMDgyNTY0MCwianRpIjoiNDUzMzMyNGUtNzI1ZS00ZWM1LTgyMWYtMDM4ODgzMGVkMTAxIiwidHlwZSI6InJlZnJlc2giLCJzdWIiOiI5NjRjZjI4NGQzN2E0MWVlOWE3ZmMxNDMxYWU4YjBjZiIsIm5iZiI6MTcxMDgyNTY0MCwiZXhwIjoxNzI2Mzc3NjQwLCJ1aWQiOiI2NGQyYjAxMjU3MDlhODQ4YTMyMTE0ZGIiLCJ1cGxhdGZvcm0iOiIiLCJyb2xlcyI6WyJ1bmF1dGhlZF91c2VyIl19.zVf7GZpwICO0jhhR1dpqhLh6xoufmJmHGMmOVhL1fEA"
+            }
+        },
+    }, 
+    {
+        "model_name": "yiyan", # openai model name 
+        "litellm_params": { # params for litellm completion/embedding call 
+            "model": "gpt-3.5-turbo", 
+            "provider":"YiYan4",
+            "api_key": "123",
+            "api_version": "",
+            "api_base": "http://pc:8003/v1/",
+            # 在 litellm.main compelte 中可以看到 litellm_params
+        },
+    }, 
+]
+
+def main():
+    router = Router(model_list=model_list, 
+                    fallbacks=[{"g4f/gpt-3.5-turbo": ["gpt-3.5-turbo"]}, 
+                               {"g4f-sv-v2/gpt-3.5-turbo": ["gpt-3.5-turbo"]},
+                                {"chatanywhere/gpt-3.5-turbo": ["gpt-3.5-turbo"]},
+                                {"qwen": ["gpt-3.5-turbo"]},
+                                {"kimi": ["gpt-3.5-turbo"]},
+                                {"chatgml4": ["gpt-3.5-turbo"]},
+                               ], 
+                    context_window_fallbacks=[{"kimi": ["gpt-3.5-turbo"]}, {"chatgml4": ["gpt-3.5-turbo"]}],
+                    set_verbose=True)
+    user_message = "你是"
+    messages = [{"content": user_message, "role": "user"}]
+
+    # normal fallback call 
+    # response = router.completion(model="g4f/gpt-3.5-turbo", messages=messages,)
+    response = completion(model_list=model_list, model="yiyan", messages=messages,)
+
+
+    print(f"response: {response}")
+
+if __name__ == "__main__":
+    main()

+ 58 - 0
gpt.md

@@ -1,3 +1,61 @@
+## DrissionPage
+请注意,我使用的浏览器框架是 DrissionPage,框架的查找语法为:
+
+```python
+ele2 = ele1.ele('xpath://div')
+# 获取 ele1 的第二层父元素
+ele2 = ele1.parent(2)
+
+# 获取 ele1 父元素中 id 为 id1 的元素
+ele2 = ele1.parent('#id1')
+# 获取 ele1 后面第一个兄弟元素
+ele2 = ele1.next()
+
+# 获取 ele1 后面第 3 个兄弟元素
+ele2 = ele1.next(3)
+# 获取 ele1 前面第一个兄弟元素
+ele2 = ele1.prev()
+
+# 获取 ele1 前面第 3 个兄弟元素
+ele2 = ele1.prev(3)
+# 获取 ele1 前面第 3 个元素
+ele2 = ele1.before(3)
+
+# 获取 ele1 前面第 3 个 div 元素
+ele2 = ele1.before('tag:div', 3)
+
+# 获取 ele1 前面第一个文本节点的文本
+txt = ele1.before('xpath:text()', 1)
+```
+
+```html
+<div class="EUJzwIMS">
+    <div class="aaXMdnbr"><img class="kcsILzHu lxo0LycR"
+            src="https://p3.douyinpic.com/aweme/100x100/aweme-avatar/tos-cn-i-0813_66c4e34ae8834399bbf967c3d3c919db.jpeg?from=3782654143">
+    </div>
+    <div class="_bWlYXcH hTpiS0Ay mrl0UYGr">
+        <div class="biFaMC6S">
+            <div class="gZdlhsqq">程序员马工</div>
+        </div>
+        <div class="wiYIhq4q">
+            <div class="DDOhSZqR C0_yyLLi">
+                <pre class="MnyOYvbN">1</pre>
+            </div>
+            <div class="skNuRdW_">&nbsp;·&nbsp;刚刚</div>
+        </div>
+    </div>
+    <div class="hcPUqxqn">
+        <div class="tJjNB1rt ge6Vyp_V o9h6ErL7">1</div>
+    </div>
+</div>
+```
+有如上元素,现在已经根据 ele_msg_red_pot = ele_list_dlg.ele('xpath://div[@class="hcPUqxqn"]') 找到了 未读消息1,情帮我根据相对定位 如 ele_msg_red_pot.prev().child(1) 或xpath 语法 来找到名字,头像,消息。最后的消息格式为: {"name":"程序员马工", "avator":"urlxxx", "new_msg":2, "time":"刚刚"}
+
+
+
+
+
+
 ## 构思
 假设要做一个直播信息统计的工具,产生的数据如下:
 - 每个主播开启一个独立的直播间,可能有上万个主播,也就是数万个直播间。