| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647 |
- import datetime
- import os
- import re
- import time
- import os
- import sys
- sys.path.append(os.path.dirname(os.path.dirname(__file__)))
- from dp.page import page
- from douyin import chat_test,base,conversation,user
- from conf.config import logger,OUTPUT,WORK_DIR
- from database.config import ai_yunying_db,minio_block
- from dataset import Table
- from DrissionPage import ChromiumPage
- from DrissionPage._elements.chromium_element import ChromiumElement
- from DrissionPage._units.listener import DataPacket
- import jsonpath
- from prefect import flow,task
- from prefect.tasks import Task,TaskRun
- from prefect.flows import Flow
- from prefect.states import State,StateType
- import pathlib
- from ai.api import chat
- from g4f import client
- from litellm import completion,get_llm_provider
- tab = base.tab
- from g4f.client import Client
- from litellm import get_supported_openai_params
- import os
- from litellm import completion
- ## set ENV variables
- os.environ["OPENAI_API_KEY"] = "anything" #key is not used for proxy
- messages = [{ "content": "who are you","role": "user"}]
- response = completion(
- model='gpt-3.5-turbo-16k',
- messages=messages,
- # api_base="http://pc:8003/v1",
- api_base="http://sv-v2:1337/v1",
- custom_llm_provider="openai" # litellm will use the openai.ChatCompletion to make the request
- )
- print(response)
- # f = open('user.html', 'w')
- # f.write(tab.html)
- # logger.info(f"{tab.html}")
|