Эх сурвалжийг харах

feat: add column insertion and improve error handling in process_data.py

mrh (aider) 1 жил өмнө
parent
commit
abbcd04b88
1 өөрчлөгдсөн 16 нэмэгдсэн , 3 устгасан
  1. 16 3
      process_data.py

+ 16 - 3
process_data.py

@@ -30,6 +30,12 @@ def read_csv(file_path):
     
     raise Exception("Failed to read file with all attempted encodings")
 
+def insert_empty_column(data, column_index):
+    """在指定列之后插入一个空列"""
+    for row in data:
+        row.insert(column_index + 1, '')  # 插入在目标列后面
+    return data
+
 def process_row(row, search_term_index):
     # Add translation column after search term
     search_term = row[search_term_index]
@@ -49,7 +55,8 @@ def process_row(row, search_term_index):
         print(f"Translation error for '{search_term}': {str(e)}")
         translated = f"翻译失败(异常:{str(e)})"
     
-    row.insert(search_term_index + 1, translated)
+    # Update the row with translation in the new column
+    row[search_term_index + 1] = translated
     
     # Add Amazon search link
     amazon_url = f"https://www.amazon.co.jp/s?k={search_term}"
@@ -67,8 +74,14 @@ def main(input_file, output_file):
         # Read CSV with proper encoding
         data = read_csv(input_file)
         
-        # Process each row (skip header row)
+        # Insert empty column for translations after search term column
         search_term_index = 1  # Search term is in second column
+        data = insert_empty_column(data, search_term_index)
+        
+        # Update header row with new column name
+        data[0].insert(search_term_index + 1, "中文翻译")
+        
+        # Process each row (skip header row)
         for i, row in enumerate(data[1:], start=1):
             try:
                 print(f"\nProcessing row {i}")
@@ -79,7 +92,7 @@ def main(input_file, output_file):
                 # Insert empty translation column to maintain structure
                 row.insert(search_term_index + 1, "翻译失败(处理错误)")
                 data[i] = row
-                break
+                continue
         
         # Save processed data
         save_csv(data, output_file)