| 1234567891011121314151617181920212223242526272829303132333435363738394041424344 |
- from pathlib import Path
- import sys
- # 为了避免耦合,微服务,可能确实要将上级的上级目录作为一个单独的进程来处理,此目录作为一个单独的UI项目
- sys.path.append(str(Path(r'G:\code\upwork\zhang_crawl_bio\ui\backend')))
- from src.services.subscription_manager import SubscriptionManager
- from utils.config import config,APP_PATH
- from utils.process_mgr import process_manager
- import asyncio
- from utils.logu import get_logger
- from routers.worker import health_check
- import os
- logger = get_logger('mytests', file=True)
- async def main():
- python_exe = sys.executable
- WORKER_DIR_BASE = APP_PATH.parent.parent
- logger.info(f"{WORKER_DIR_BASE}")
- # return
- redis_cmd = [config.redis_exe]
- logger.info(f"{redis_cmd}")
- flower_db = WORKER_DIR_BASE / 'output' / 'flower_db'
- await process_manager.start_process("redis_cmd", redis_cmd, cwd=WORKER_DIR_BASE)
- # G:\code\upwork\zhang_crawl_bio\crawl_env\python.exe -m celery -A worker.celery.app flower --address=127.0.0.1 --persistent=True --db=".\output\flower_db"
- flower_cmd = [python_exe, '-m', 'celery', '-A', 'worker.celery.app', 'flower', '--address=127.0.0.1', '--persistent=True', f'--db={flower_db}']
- await process_manager.start_process("flower", flower_cmd, cwd=WORKER_DIR_BASE)
- proces = process_manager.processes.get("flower").get('process')
- search_worker_name = 'search'
- crawl_worker_name = 'crawl'
- convert_worker_name = 'convert'
- worker_list = [search_worker_name, crawl_worker_name, convert_worker_name]
- for worker_name in worker_list:
- await process_manager.start_process(f"{worker_name}_worker", [python_exe, '-m', 'celery', '-A', 'worker.celery.app', 'worker', '-Q',f'{worker_name}_queue', f'--hostname={worker_name}@%h'], cwd=WORKER_DIR_BASE)
-
- logger.info(f"{proces}")
- await proces.wait()
- return
- return
- res = await health_check()
- print(res)
- if __name__ == "__main__":
- asyncio.run(main())
|