LuTong 2 bulan lalu
induk
melakukan
e479889134
3 mengubah file dengan 84 tambahan dan 78 penghapusan
  1. 32 11
      src/excel_handler.py
  2. 3 4
      src/gui.py
  3. 49 63
      src/scraper.py

+ 32 - 11
src/excel_handler.py

@@ -1,23 +1,28 @@
-# 【版本:2026-01-16 极致稳定版】
 import sys
 import os
 import time
 from openpyxl import load_workbook
 
 def get_resource_path(relative_path):
+    """ 获取资源绝对路径,兼容开发环境和 PyInstaller 打包环境 """
     if hasattr(sys, '_MEIPASS'):
         return os.path.join(sys._MEIPASS, relative_path)
     base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
     return os.path.join(base_dir, relative_path)
 
 def get_existing_info(file_path):
+    """
+    读取已有文件中的链接和最后一行编码
+    """
     links = set()
     last_code = 0
     if not os.path.exists(file_path):
         return links, last_code
+    
     try:
         wb = load_workbook(file_path, data_only=True)
         ws = wb.active
+        # A 列是编码,K 列是链接
         for r in range(3, ws.max_row + 1):
             code_val = ws.cell(row=r, column=1).value
             link_val = ws.cell(row=r, column=11).value
@@ -27,28 +32,42 @@ def get_existing_info(file_path):
     return links, last_code
 
 def append_to_template(products, output_path, status_callback=None):
+    """
+    将产品数据追加写入到指定的 Excel 文件中。
+    并在第二个 Sheet 中记录商品总数。
+    """
     template_path = get_resource_path(os.path.join('templates', '【进价】产品信息空表.xlsx'))
-    if not os.path.exists(template_path): template_path = os.path.join('templates', '【进价】产品信息空表.xlsx')
-    if not os.path.exists(template_path): raise FileNotFoundError(f"未找到模板: {template_path}")
+    
+    if not os.path.exists(template_path):
+        template_path = os.path.join('templates', '【进价】产品信息空表.xlsx')
+
+    if not os.path.exists(template_path):
+        raise FileNotFoundError(f"未找到核心模板文件: {template_path}")
 
-    if os.path.exists(output_path): wb = load_workbook(output_path)
+    if os.path.exists(output_path):
+        wb = load_workbook(output_path)
     else:
         os.makedirs(os.path.dirname(output_path), exist_ok=True)
         wb = load_workbook(template_path)
     
+    # 1. 写入主数据 Sheet
     ws = wb.active
+    
+    # 寻找起始行 (基于第 11 列“产品链接”判定)
     start_row = 3
     for r in range(3, ws.max_row + 2):
         val_link = ws.cell(row=r, column=11).value
         if val_link is None or str(val_link).strip() == "":
             start_row = r
             break
-    else: start_row = ws.max_row + 1
+    else:
+        start_row = ws.max_row + 1
     
-    current_links = set()
+    # 统计已有链接集合
+    all_links = set()
     for r in range(3, start_row):
-        link = ws.cell(row=r, column=11).value
-        if link: current_links.add(str(link).strip())
+        l = ws.cell(row=r, column=11).value
+        if l: all_links.add(str(l).strip())
 
     for i, product in enumerate(products):
         row = start_row + i
@@ -64,12 +83,14 @@ def append_to_template(products, output_path, status_callback=None):
         ws.cell(row=row, column=10, value=product.get('wholesale_price', ''))
         ws.cell(row=row, column=11, value=product.get('link', '')) 
         ws.cell(row=row, column=12, value=product.get('supplier', ''))
-        if product.get('link'): current_links.add(str(product['link']).strip())
+        if product.get('link'): all_links.add(str(product['link']).strip())
 
-    if "统计状态" not in wb.sheetnames: wb.create_sheet("统计状态")
+    # 2. 写入/更新统计 Sheet
+    if "统计状态" not in wb.sheetnames:
+        wb.create_sheet("统计状态")
     ws_stat = wb["统计状态"]
     ws_stat.cell(row=1, column=1, value="已解析商品总数")
-    ws_stat.cell(row=1, column=2, value=len(current_links))
+    ws_stat.cell(row=1, column=2, value=len(all_links))
     ws_stat.cell(row=2, column=1, value="最后更新时间")
     ws_stat.cell(row=2, column=2, value=time.strftime("%Y-%m-%d %H:%M:%S"))
 

+ 3 - 4
src/gui.py

@@ -1,4 +1,3 @@
-# 【版本:2026-01-16 极致稳定版】
 import sys
 import os
 import time
@@ -14,7 +13,8 @@ from src.scraper import Scraper1688
 from src.excel_handler import append_to_template, get_existing_info
 
 def get_resource_path(relative_path):
-    if hasattr(sys, '_MEIPASS'): return os.path.join(sys._MEIPASS, relative_path)
+    if hasattr(sys, '_MEIPASS'):
+        return os.path.join(sys._MEIPASS, relative_path)
     return os.path.join(os.getcwd(), relative_path)
 
 class ScraperThread(QThread):
@@ -65,7 +65,7 @@ class ScraperThread(QThread):
                 self.progress.emit(min(prog, 100))
             
             duration = time.time() - start_time
-            self.log.emit(f"<b>[完成] 任务结束,本次新增抓取 {collected_count} 条数据。</b>")
+            self.log.emit(f"<b>[完成] 任务结束,本次共解析 {product_index - initial_p_count} 个商品。</b>")
             self.log.emit(f"<b>[耗时] 处理总时间: {duration:.2f} 秒</b>")
             self.finished.emit("", scraper, duration)
         except Exception as e:
@@ -80,7 +80,6 @@ class MainWindow(QMainWindow):
         self.selected_category_1 = ""
         self.selected_category_2 = ""
         self.output_base_path = ""
-        self.active_scraper = None
         self.initUI()
         self.load_default_categories()
 

+ 49 - 63
src/scraper.py

@@ -1,4 +1,6 @@
-# 【版本:2026-01-16 14:00 - 变体多列同步增强版】
+# 【版本:2026-01-16 14:45 - 全功能合一终极版】
+# 核心功能:支持变体拆分、精准提取款式与价格、对标 req.py 逻辑、激活懒加载
+# 反爬策略:极低频抓取、大跨度深度休眠、行为路径混淆
 import sys
 try:
     import distutils
@@ -25,7 +27,6 @@ class Scraper1688:
         self.headless = headless
         self.status_callback = status_callback
         self.log_callback = log_callback
-        # 使用独立的 Profile 目录,避免并发冲突
         self.user_data_path = os.path.abspath(os.path.join(os.getcwd(), "chrome_stable_profile"))
         self.driver = None
         self._cleanup()
@@ -36,10 +37,7 @@ class Scraper1688:
     def _find_chrome(self):
         """ 强力锁定 Chrome 安装路径 """
         import winreg
-        reg_paths = [
-            (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe"),
-            (winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe")
-        ]
+        reg_paths = [(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe"), (winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe")]
         for hkey, subkey in reg_paths:
             try:
                 with winreg.OpenKey(hkey, subkey) as key:
@@ -49,7 +47,6 @@ class Scraper1688:
         return None
 
     def _cleanup(self):
-        """ 强制杀掉残留进程,确保环境纯净 """
         if os.name == 'nt':
             for proc in ['chrome.exe', 'chromedriver.exe']:
                 try: subprocess.call(['taskkill', '/F', '/IM', proc, '/T'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
@@ -62,9 +59,7 @@ class Scraper1688:
                         except: pass
 
     def _init_chrome(self, headless):
-        """ 强化版 Chrome 启动逻辑 """
         chrome_path = self._find_chrome()
-        
         def create_options():
             opts = uc.ChromeOptions()
             opts.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36')
@@ -76,16 +71,13 @@ class Scraper1688:
             opts.add_argument("--disable-dev-shm-usage")
             opts.add_argument("--remote-allow-origins=*")
             return opts
-
         try:
-            # 优先使用 subprocess 模式启动,解决 Win11 连接难题
             self.driver = uc.Chrome(options=create_options(), headless=headless, browser_executable_path=chrome_path, use_subprocess=True)
         except:
-            # 失败则尝试普通模式
             self.driver = uc.Chrome(options=create_options(), headless=headless, use_subprocess=True)
 
     def clean_url(self, url):
-        """ 鲁棒的 ID 提取 logic """
+        """ 鲁棒的 ID 提取并重组链接 """
         if not url: return ""
         url_str = str(url)
         if url_str.startswith("//"): url_str = "https:" + url_str
@@ -106,8 +98,8 @@ class Scraper1688:
             if self.status_callback: self.status_callback(True, msg)
             while is_blocked(): time.sleep(2)
             if self.status_callback: self.status_callback(False, "验证通过")
-            # 验证成功后强制冷却
-            time.sleep(random.randint(60, 120))
+            if self.log_callback: self.log_callback("<font color='orange'>验证成功,进入 120 秒冷却期以规避风控...</font>")
+            time.sleep(120)
         return True
 
     def search_products_yield(self, keyword, total_count=200, existing_links=None):
@@ -115,32 +107,25 @@ class Scraper1688:
         base_url = f"https://s.1688.com/selloffer/offer_search.htm?keywords={gbk_keyword}&n=y&netType=1%2C11%2C16"
         self.driver.get("https://www.1688.com")
         self.check_for_captcha()
-        
         all_links = existing_links if existing_links is not None else set()
         page, initial_count = 1, len(all_links)
         
         while len(all_links) < total_count + initial_count:
-            page_anotation = f"[∫] 列表页采集: 第 {page} 页...";
-            print(page_anotation)
-            if self.log_callback: self.log_callback(page_anotation)
-            
+            print(f"[*] 正在处理列表页: 第 {page} 页...")
             self.driver.get(f"{base_url}&beginPage={page}&page={page}")
             self.check_for_captcha()
             
-            # --- 强化:模拟真实人类分段滚动,深度触发懒加载 ---
-            # 针对“第一页只解析到1个商品”的问题,增加滚动次数和回弹动作
+            # --- 关键改进:15段脉冲式回弹滚动,彻底激活懒加载 ---
             for i in range(1, 16):
                 self.driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {i/15});")
-                time.sleep(random.uniform(1.5, 3.0))
-                # 关键:每隔几步向上“回弹”一下,这种非规律动作最能触发 1688 的加载钩子
-                if i % 3 == 0:
-                    self.driver.execute_script(f"window.scrollBy(0, -{random.randint(300, 600)});")
-                    time.sleep(1.2)
-            
-            time.sleep(random.uniform(4, 7)) # 最终等待数据同步到变量
+                time.sleep(random.uniform(1.2, 2.5))
+                if i % 4 == 0:
+                    self.driver.execute_script(f"window.scrollBy(0, -400);")
+                    time.sleep(1.0)
+            time.sleep(5)
 
             page_results = self._extract_all_methods()
-            print(f"  [+] 本页解析完成:共发现 {len(page_results)} 个商品链接")
+            print(f"  [+] 本页解析完成:共发现 {len(page_results)} 个商品条目")
             
             page_batch = []
             for it in page_results:
@@ -148,11 +133,11 @@ class Scraper1688:
                 if clean_url and clean_url not in all_links:
                     all_links.add(clean_url)
                     
-                    # 保护机制
+                    # 冷却机制:每 12 条大休息一次
                     new_count = len(all_links) - initial_count
                     if new_count > 0 and new_count % 12 == 0:
                         rest_secs = random.randint(300, 600)
-                        if self.log_callback: self.log_callback(f"<font color='red'><b>保护机制:进入休眠 {rest_secs//60} 分钟...</b></font>")
+                        if self.log_callback: self.log_callback(f"<font color='red'><b>保护机制:进入深度休眠 {rest_secs//60} 分钟...</b></font>")
                         time.sleep(rest_secs)
 
                     print(f"  [>] 详情仿真抓取: {clean_url}")
@@ -166,25 +151,31 @@ class Scraper1688:
                         yield page_batch
                         page_batch = []
                     
+                    # 详情页后的随机等待
                     time.sleep(random.uniform(40, 80)) 
                     if len(all_links) >= total_count + initial_count: break
             
             if page_batch: yield page_batch
             page += 1
+            # 列表页间的重置
             self.driver.get("https://www.1688.com")
             time.sleep(60)
         return list(all_links)
 
     def scrape_detail(self, url):
-        """ 极精准变体解析:针对 expand-view-list 区域,精准提取款式名称与逐条价格 """
+        """ 极精准变体解析:针对 expand-view-list 区域,成对提取款式与价格 """
         try:
             self.driver.get(url)
-            # 仿真阅读:停留更久并随机滚动,确保变体区域完全渲染
             time.sleep(random.uniform(8, 15))
-            for _ in range(random.randint(2, 4)):
-                self.driver.execute_script(f"window.scrollBy(0, {random.randint(300, 700)});")
-                time.sleep(random.uniform(2.0, 4.0))
             
+            # 自动展开隐藏变体
+            try:
+                expand_btns = self.driver.find_elements(By.XPATH, "//div[contains(@class,'expand-view-list')]//div[contains(text(),'更多') or contains(text(),'展开')]")
+                if expand_btns:
+                    self.driver.execute_script("arguments[0].click();", expand_btns[0])
+                    time.sleep(2)
+            except: pass
+
             self.check_for_captcha()
             model = self.driver.execute_script("return (window.context && window.context.result && window.context.result.global && window.context.result.global.globalData && window.context.result.global.globalData.model) || window.__INITIAL_DATA__ || window.iDetailData || window.iDetailConfig || null;")
             if not model: return None
@@ -207,9 +198,10 @@ class Scraper1688:
                 "category": (model.get("offerDetail", {}).get("leafCategoryName", "") if isinstance(model, dict) else "") or self.driver.find_element(By.CSS_SELECTOR, "div[class*=breadcrumb] a:last-child").text.strip(),
                 "brand": get_attr("品牌"),
                 "name": (model.get("offerDetail", {}).get("subject", "") if isinstance(model, dict) else "") or self.driver.title.split('-')[0],
-                "spec": "", # 待填充变体信息
-                "color": "", # 待填充款式描述
+                "spec": "", 
+                "color": "", 
                 "material": get_attr("材质") or get_attr("面料"),
+                "price": "", 
                 "moq": trade.get("beginAmount", ""),
                 "wholesale_price": range_text,
                 "link": url,
@@ -218,48 +210,40 @@ class Scraper1688:
 
             variant_results = []
             try:
-                # 【核心修正】精准锁定 expand-view-list 区域
+                # 锁定容器
                 wrappers = self.driver.find_elements(By.CSS_SELECTOR, ".expand-view-list, .expand-view-list-wrapper")
                 if wrappers:
-                    # 获取该容器下的每一个变体子项条目
+                    # 寻找容器下的每一个子条目
                     items = wrappers[0].find_elements(By.CSS_SELECTOR, ".expand-view-list-item, [class*='list-item'], .sku-item")
                     for item_el in items:
                         try:
-                            # 1. 提取款式描述文字 (item-label)
-                            l_el = item_el.find_elements(By.CLASS_NAME, "item-label")
-                            # 2. 提取逐条对应的价格 (item-price-stock)
-                            p_el = item_el.find_elements(By.CLASS_NAME, "item-price-stock")
+                            # 描述文字 -> 颜色列 + 规格列
+                            label_el = item_el.find_elements(By.CLASS_NAME, "item-label")
+                            # 单价 -> 价格列
+                            price_el = item_el.find_elements(By.CLASS_NAME, "item-price-stock")
                             
-                            if l_el and p_el:
-                                label_text = l_el[0].text.strip()
-                                price_raw = p_el[0].text.strip()
-                                # 价格清洗:只保留数字和小数点
+                            if label_el and price_el:
+                                label_text = label_el[0].text.strip()
+                                price_raw = price_el[0].text.strip()
                                 price_clean = re.sub(r'[^\d.]', '', price_raw)
                                 
                                 if label_text:
                                     row = base_data.copy()
-                                    # 根据用户最新要求:
-                                    # 款式描述文字写入“颜色”列
                                     row["color"] = label_text
-                                    # 同时也将描述文字写入“规格尺码”列,完全符合用户示例
                                     row["spec"] = label_text
-                                    # 对应价格写入“单品进价(元)”列 (price)
                                     row["price"] = price_clean if price_clean else price_raw
                                     variant_results.append(row)
                         except: continue
             except: pass
 
             if variant_results:
-                print(f"  [+] 成功解析到 {len(variant_results)} 个款式变体")
+                print(f"  [+] 成功解析到 {len(variant_results)} 个规格变体")
                 return variant_results
-            
-            # 方案 B: 兜底逻辑
-            base_data["price"] = trade.get("minPrice", "")
             return [base_data]
         except: return None
 
     def _extract_all_methods(self):
-        """ 强化版探测:从内存变量中抓取列表 """
+        """ 强化版:对标 req.py 深度探测所有内存数据 """
         results = []
         seen_ids = set()
         def add_item(name, link):
@@ -267,7 +251,11 @@ class Scraper1688:
             if cid and cid not in seen_ids:
                 seen_ids.add(cid); results.append({"name": name, "link": cid})
 
-        scripts = ["return JSON.stringify(window.data || window.context?.result?.data || window.__INITIAL_DATA__)", "return JSON.stringify(window.context?.result?.global?.globalData?.data || null)", "return JSON.stringify(window.pageData || null)"]
+        scripts = [
+            "return JSON.stringify(window.data || window.context?.result?.data || window.__INITIAL_DATA__)",
+            "return JSON.stringify(window.context?.result?.global?.globalData?.data || null)",
+            "return JSON.stringify(window.pageData || null)"
+        ]
         for s in scripts:
             try:
                 res = self.driver.execute_script(s)
@@ -279,10 +267,8 @@ class Scraper1688:
                         if isinstance(obj, dict):
                             for k in obj: lists.extend(find_lists(obj[k]))
                         return lists
-                    for product_list in find_lists(data):
-                        for o in product_list:
-                            link = o.get('itemUrl', o.get('url', '')) or str(o.get('offerId', ''))
-                            add_item(str(o.get('title', o.get('subject', ''))), link)
+                    for plist in find_lists(data):
+                        for o in plist: add_item(str(o.get('title', o.get('subject', ''))), o.get('itemUrl', o.get('url', '')) or str(o.get('offerId', '')))
                     if results: return results
             except: continue
         return results