| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171 |
- import csv
- import chardet
- import sys
- import logging
- from pathlib import Path
- from mylib.pdfzh_translator import OpenAITranslator
- from brand_add_url_link import create_hyperlink, create_asin_link
- from mylib.logging_config import setup_logging
- # Setup custom logging
- setup_logging()
- logger = logging.getLogger(__name__)
- def detect_encoding(file_path):
- try:
- with open(file_path, 'rb') as f:
- raw_data = f.read()
- result = chardet.detect(raw_data)
- return result['encoding']
- except Exception as e:
- logger.error(f"Error detecting encoding for {file_path}: {e}")
- sys.exit(1)
- def read_csv(file_path):
- encodings_to_try = ['utf-8-sig', 'gb18030', 'shift_jis', 'euc-jp']
- detected_encoding = detect_encoding(file_path)
- logger.info(f"Detected encoding: {detected_encoding}")
-
- if detected_encoding:
- encodings_to_try.insert(0, detected_encoding)
-
- for encoding in encodings_to_try:
- try:
- with open(file_path, 'r', encoding=encoding) as f:
- reader = csv.reader(f)
- return list(reader)
- except UnicodeDecodeError:
- continue
- except Exception as e:
- logger.error(f"Error with encoding {encoding}: {e}")
- continue
-
- logger.error("Failed to read file with all attempted encodings")
- sys.exit(1)
- def insert_empty_columns(data, column_indices):
- """在指定列之后插入空列"""
- try:
- # 按从大到小排序,防止插入影响后续索引
- column_indices.sort(reverse=True)
- for row in data:
- for index in column_indices:
- row.insert(index + 1, '')
- return data
- except Exception as e:
- logger.error(f"Error inserting empty columns: {e}")
- sys.exit(1)
- def process_batch_translations(data, search_term_index, category_indices):
- """批量处理翻译"""
- try:
- # 初始化翻译器
- translator = OpenAITranslator("openai", "zh-CN", "en", "gpt-3.5-turbo")
-
- # 收集所有需要翻译的文本
- translation_batches = {
- 'search_terms': [row[search_term_index] for row in data[2:]], # 从第三行开始
- 'categories': []
- }
-
- # 收集类别翻译
- for index in category_indices:
- translation_batches['categories'].extend([row[index] for row in data[2:]]) # 从第三行开始
-
- # 批量翻译
- logger.info("Starting batch translations...")
- search_translations = translator.translate(translation_batches['search_terms'])
- category_translations = translator.translate(translation_batches['categories'])
- logger.info("Batch translations completed")
-
- # 更新数据
- for i, row in enumerate(data[2:], start=2): # 从第三行开始处理
- try:
- # 更新搜索词翻译列
- row[search_term_index + 1] = search_translations[i-2]
-
- # 添加亚马逊搜索链接(跳过标题行)
- amazon_url = f"https://www.amazon.co.jp/s?k={row[search_term_index]}"
- row[search_term_index] = create_hyperlink(row[search_term_index], amazon_url)
-
- # 更新类别翻译
- category_trans_index = (i-2) * len(category_indices)
- for cat_index in category_indices:
- row[cat_index + 1] = category_translations[category_trans_index]
- category_trans_index += 1
-
- except Exception as e:
- logger.error(f"Error processing row {i}: {e}")
- sys.exit(1)
-
- return data
- except Exception as e:
- logger.error(f"Error in batch translation: {e}")
- sys.exit(1)
- def add_brand_asin_links(data, brand_indices, asin_indices):
- """为品牌和ASIN列添加链接"""
- try:
- for row in data[2:]: # 从第三行开始处理
- # 处理品牌列
- for index in brand_indices:
- if index < len(row) and row[index]:
- row[index] = create_hyperlink(row[index], 'https://www.amazon.co.jp/s?k=')
-
- # 处理ASIN列
- for index in asin_indices:
- if index < len(row) and row[index]:
- row[index] = create_asin_link(row[index])
- return data
- except Exception as e:
- logger.error(f"Error adding brand/ASIN links: {e}")
- sys.exit(1)
- def save_csv(data, file_path):
- try:
- with open(file_path, 'w', encoding='utf-8-sig', newline='') as f:
- writer = csv.writer(f)
- writer.writerows(data)
- except Exception as e:
- logger.error(f"Error saving CSV to {file_path}: {e}")
- sys.exit(1)
- def main(input_file, output_file):
- try:
- # Read CSV with proper encoding
- data = read_csv(input_file)
-
- # 定义需要处理的列索引
- search_term_index = 1 # 搜索词列
- brand_indices = [2, 3, 4] # 品牌列
- asin_indices = [7, 11, 15] # ASIN列
- category_indices = [5, 6, 7] # 类别列
-
- # 插入空列用于翻译
- insert_indices = [search_term_index] + category_indices
- data = insert_empty_columns(data, insert_indices)
-
- # 更新标题行
- data[0][search_term_index + 1] = "中文翻译"
- for index in category_indices:
- data[0].insert(index + 1, "中文翻译")
-
- # 处理翻译
- data = process_batch_translations(data, search_term_index, category_indices)
-
- # 添加品牌和ASIN链接
- data = add_brand_asin_links(data, brand_indices, asin_indices)
-
- # 保存处理后的数据
- save_csv(data, output_file)
- logger.info(f"Successfully processed and saved to {output_file}")
-
- except Exception as e:
- logger.error(f"Error processing file: {e}")
- sys.exit(1)
- if __name__ == "__main__":
- output_dir = Path('temp')
- input_file = output_dir/"测试.csv"
- output_file = output_dir/"processed_测试.csv"
- main(input_file, output_file)
|