|
|
@@ -1,12 +1,16 @@
|
|
|
import asyncio
|
|
|
+import json
|
|
|
import logging
|
|
|
from pathlib import Path
|
|
|
import re
|
|
|
+from pydantic import BaseModel
|
|
|
+from scrapling import Adaptor
|
|
|
from worker.search_engine.camoufox_broswer import BrowserConfig, BrowserCore
|
|
|
from playwright.async_api import Browser, Page, async_playwright
|
|
|
from playwright.sync_api import sync_playwright
|
|
|
from mylib.logu import logger
|
|
|
-
|
|
|
+from mylib.base import save_to_file
|
|
|
+from config.settings import OUTPUT_DIR
|
|
|
# ------------------- Search Engine Implementation -------------------
|
|
|
class GoogleSearchHandler():
|
|
|
"""搜索引擎专用处理器(通过CDP连接)"""
|
|
|
@@ -22,29 +26,79 @@ class GoogleSearchHandler():
|
|
|
"""执行搜索操作"""
|
|
|
try:
|
|
|
await self.goto_home_page()
|
|
|
- await self.page.fill('text1area[aria-label="Search"]', query, timeout=3000)
|
|
|
+ await self.page.fill('textarea[aria-label="Search"]', query, timeout=10000)
|
|
|
await self.page.press('textarea[aria-label="Search"]', 'Enter')
|
|
|
+ # 等待加载完成
|
|
|
+ await self.page.wait_for_load_state(state='load', timeout=10000)
|
|
|
return await self.page.content()
|
|
|
except Exception as e:
|
|
|
- logger.error(f"Search failed: {str(e)}")
|
|
|
+ logger.exception(f"Search failed: {str(e)}")
|
|
|
return {"status": "error", "message": str(e)}
|
|
|
+ def get_search_result_ele(self, html_content:str):
|
|
|
+ include = {
|
|
|
+ 'search_div': '//div[@id="search"]',
|
|
|
+ 'cite': './/cite'
|
|
|
+ }
|
|
|
+ exclude = {
|
|
|
+ 'people_also_ask': './/*[@data-initq]'
|
|
|
+ }
|
|
|
+ selector_xpath = {
|
|
|
+ 'include': include,
|
|
|
+ 'exclude': exclude
|
|
|
+ }
|
|
|
+ res = {}
|
|
|
+ page = Adaptor(html_content)
|
|
|
+ body = Adaptor(page.body)
|
|
|
+ search_div = body.xpath(selector_xpath['include']['search_div'])
|
|
|
+ res['search_div'] = True if search_div else False
|
|
|
+ if search_div:
|
|
|
+ # 获取所有 a 标签
|
|
|
+ # result_list = search_div.xpath('.//span/a/h3')
|
|
|
+ result_list = search_div.xpath('//*[@data-snc]')
|
|
|
+ logger.info(f"result_list {len(result_list)}")
|
|
|
+ # h3_list = [item for item in result_list if item.xpath('//h3')]
|
|
|
+ search_res = {'total_count': len(result_list), 'results': []}
|
|
|
+ for result_item in result_list:
|
|
|
+ # logger.info(f"result_item {type(result_item)} {result_item}")
|
|
|
+ result = {}
|
|
|
+ if len(result_item.children) < 2:
|
|
|
+ continue
|
|
|
+ title_ele = result_item.children[0]
|
|
|
+ if title_ele:
|
|
|
+ url = title_ele.xpath_first('.//a/@href')
|
|
|
+ result['url'] = url
|
|
|
+ title = title_ele.xpath_first('.//h3/text()')
|
|
|
+ result['title'] = title
|
|
|
|
|
|
+ content_ele = result_item.children[1]
|
|
|
+ logger.info(f"content_ele {content_ele}")
|
|
|
+ if content_ele:
|
|
|
+ content_list = content_ele.xpath('.//span/text()')
|
|
|
+ result['content'] = ''.join(content_list)
|
|
|
+ logger.info(f"result {result}")
|
|
|
+ if result:
|
|
|
+ search_res['results'].append(result)
|
|
|
+ return search_res
|
|
|
async def aio_main(config: BrowserConfig = BrowserConfig()):
|
|
|
- """通过CDP连接浏览器实例"""
|
|
|
try:
|
|
|
core = await BrowserCore.get_instance(config)
|
|
|
search_handler = GoogleSearchHandler(core.page)
|
|
|
|
|
|
# 测试搜索功能
|
|
|
content = await search_handler.search('python playwright')
|
|
|
+ save_path = save_to_file(content, OUTPUT_DIR /'analyze'/ 'test.html')
|
|
|
+ logger.info(f"save_path {save_path}")
|
|
|
logger.info(f"当前页面: {search_handler.page.url}")
|
|
|
+ res = search_handler.get_search_result_ele(content)
|
|
|
+ # 漂亮输出
|
|
|
+ logger.info(f"{json.dumps(res, indent=4, ensure_ascii=False)}")
|
|
|
# html_save =
|
|
|
# 保持连接活跃
|
|
|
while True:
|
|
|
await asyncio.sleep(5)
|
|
|
|
|
|
except Exception as e:
|
|
|
- logger.error(f"CDP连接失败: {str(e)}")
|
|
|
+ logger.error(f"失败: {str(e)}")
|
|
|
raise
|
|
|
def connet_ws():
|
|
|
with sync_playwright() as p:
|
|
|
@@ -59,8 +113,15 @@ def connet_ws():
|
|
|
print(page.url)
|
|
|
return
|
|
|
|
|
|
+def analyze():
|
|
|
+ html_file = Path(r"K:\code\upwork\zhang_crawl_bio\output\analyze\test.html")
|
|
|
+ search_handler = GoogleSearchHandler(None)
|
|
|
+ res = search_handler.get_search_result_ele(html_file.read_text())
|
|
|
+ logger.info(f"{json.dumps(res, indent=4, ensure_ascii=False)}")
|
|
|
+
|
|
|
def main():
|
|
|
- asyncio.run(aio_main())
|
|
|
+ analyze()
|
|
|
+ # asyncio.run(aio_main())
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|