LuTong hai 3 meses
pai
achega
128c8ee38c
Modificáronse 1 ficheiros con 51 adicións e 63 borrados
  1. 51 63
      src/scraper.py

+ 51 - 63
src/scraper.py

@@ -27,15 +27,14 @@ class Scraper1688:
     def __init__(self, headless=True, status_callback=None, log_callback=None):
         self.headless = headless
         self.status_callback = status_callback
-        self.log_callback = log_callback # 新增:用于向 GUI 发送普通日志
+        self.log_callback = log_callback
         self.user_data_path = os.path.abspath(os.path.join(os.getcwd(), "1688_user_data"))
         self.driver = None
         
         edge_path = self._find_edge()
         if edge_path:
-            print(f"[*] 【极致稳定模式】正在启动 Edge 深度伪装环境...")
+            print(f"[*] 【降频加固模式】正在启动 Edge 持久化 Session...")
             self._cleanup_processes()
-            # 使用固定且持久的 Session 目录,确保长效免登录
             edge_user_data = os.path.join(os.getcwd(), "1688_edge_ultimate_session")
             cmd = [
                 edge_path, 
@@ -52,7 +51,7 @@ class Scraper1688:
                 opts = EdgeOptions()
                 opts.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
                 self.driver = webdriver.Edge(options=opts)
-                print("[+] Edge 极致稳定环境接管成功!")
+                print("[+] Edge 环境已安全接管!")
             except Exception as e:
                 print(f"[!] Edge 启动失败: {e}")
         
@@ -60,13 +59,11 @@ class Scraper1688:
             self._init_chrome(headless)
 
         if self.driver:
-            # 深度擦除自动化指纹
             try:
                 self.driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                     "source": """
                         Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
                         Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh'] });
-                        Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5] });
                     """
                 })
             except: pass
@@ -107,76 +104,73 @@ class Scraper1688:
             if self.status_callback: self.status_callback(True, msg)
             while is_blocked(): time.sleep(3)
             if self.status_callback: self.status_callback(False, "验证通过")
-            
-            cool_msg = "[*] 监测到干预完成,进入 120 秒深度冷却期以重置风控权重..."
-            print(cool_msg)
+            cool_msg = "[*] 解封成功,进入 180 秒深度冷却期以规避风控追溯..."
             if self.log_callback: self.log_callback(f"<font color='orange'>{cool_msg}</font>")
-            time.sleep(120) 
+            time.sleep(180) 
         return True
 
-    def _human_behavior(self, duration=10):
-        """ 高级拟人化行为模拟 """
+    def _human_behavior(self, duration=15):
+        """ 深度留存仿真 """
         start_time = time.time()
         while time.time() - start_time < duration:
             try:
-                # 1. 随机滚动
-                scroll_y = random.randint(200, 600)
-                self.driver.execute_script(f"window.scrollBy(0, {scroll_y});")
-                # 2. 随机鼠标晃动
-                actions = ActionChains(self.driver)
-                actions.move_by_offset(random.randint(-5, 5), random.randint(-5, 5)).perform()
-                time.sleep(random.uniform(1.5, 4.0))
-                # 3. 概率性往回滚
-                if random.random() > 0.7:
-                    self.driver.execute_script(f"window.scrollBy(0, -{random.randint(100, 300)});")
+                self.driver.execute_script(f"window.scrollBy(0, {random.randint(300, 700)});")
+                time.sleep(random.uniform(2.0, 5.0))
+                if random.random() > 0.8:
+                    self.driver.execute_script("window.scrollBy(0, -200);")
             except: break
 
+    def clean_url(self, url):
+        """ 【核心订正】强力清洗链接,只访问详情页,绝对不访问搜索/相似页面 """
+        if not url: return ""
+        # 提取 ID 的正则,兼容 detail, similar_search, ad 等各种链接
+        id_match = re.search(r'offer(?:Id)?/(\d+)\.html', url) or re.search(r'[?&](?:offerId|id)=(\d+)', url)
+        if id_match:
+            # 强制转化为标准详情页链接
+            return f"https://detail.1688.com/offer/{id_match.group(1)}.html"
+        return ""
+
     def search_products_yield(self, keyword, total_count=200, existing_links=None):
         gbk_keyword = urllib.parse.quote(keyword, encoding='gbk')
         base_url = f"https://s.1688.com/selloffer/offer_search.htm?keywords={gbk_keyword}&n=y&netType=1%2C11%2C16"
         
         self.driver.get("https://www.1688.com")
-        time.sleep(random.randint(3, 6))
+        time.sleep(random.randint(5, 10))
         self.check_for_captcha()
 
         all_links = existing_links if existing_links is not None else set()
         page, initial_count = 1, len(all_links)
-        # 随机设定下一次深度冷却的阈值 (5-12条之间)
-        next_cool_threshold = random.randint(5, 12)
+        next_cool_threshold = random.randint(4, 8) # 调低冷却门槛
         
         while len(all_links) < total_count + initial_count:
-            print(f"[*] 正在模拟搜索: 第 {page} 页...")
+            print(f"[*] 正在低频搜索: 第 {page} 页...")
             self.driver.get(f"{base_url}&beginPage={page}&page={page}")
             self.check_for_captcha()
             
-            # 列表页模拟“翻找”行为
-            for _ in range(random.randint(5, 8)):
-                self.driver.execute_script(f"window.scrollBy(0, {random.randint(400, 800)});")
-                time.sleep(random.uniform(1.5, 3.5))
-                if random.random() > 0.8:
-                    self.driver.execute_script("window.scrollBy(0, -300);")
+            # 列表页极致拟人滚动
+            for _ in range(random.randint(6, 12)):
+                self.driver.execute_script(f"window.scrollBy(0, {random.randint(500, 1000)});")
+                time.sleep(random.uniform(2.0, 4.5))
 
             page_results = self._extract_all_methods()
             page_batch = []
             for it in page_results:
                 clean_url = self.clean_url(it["link"])
+                # 过滤掉无法转换的或已存在的链接
                 if clean_url and clean_url not in all_links:
                     all_links.add(clean_url)
                     
-                    # --- 核心订正:随机深度冷却 ---
                     new_processed = len(all_links) - initial_count
                     if new_processed >= next_cool_threshold:
-                        rest = random.randint(120, 300)
-                        cool_msg = f"[*] 随机触发深度保护 (已处理{new_processed}条),睡眠 {rest} 秒模拟休息..."
-                        print(cool_msg)
+                        rest = random.randint(180, 400) # 延长休眠时间
+                        cool_msg = f"[*] 账号保护启动 (已处理{new_processed}条),深度睡眠 {rest} 秒..."
                         if self.log_callback: self.log_callback(f"<font color='orange'><b>{cool_msg}</b></font>")
                         time.sleep(rest)
-                        next_cool_threshold += random.randint(5, 12) # 设定下一个随机检查点
+                        next_cool_threshold += random.randint(4, 8)
 
-                    print(f"  [>] 详情仿真采集: {clean_url}")
-                    
-                    # 访问前大幅随机停顿
-                    time.sleep(random.uniform(5, 12)) 
+                    print(f"  [>] 安全详情采集: {clean_url}")
+                    # 大跨度进入前等待
+                    time.sleep(random.uniform(8, 15)) 
                     
                     detail_results = self.scrape_detail(clean_url)
                     if detail_results: page_batch.extend(detail_results)
@@ -186,24 +180,24 @@ class Scraper1688:
                         yield page_batch
                         page_batch = []
                     
-                    # 详情页之间的大跨度等待
-                    time.sleep(random.uniform(30, 60)) 
+                    # 【关键】详情页间超长超随机等待
+                    rest_between = random.randint(40, 80)
+                    time.sleep(rest_between) 
                     
                     if len(all_links) >= total_count + initial_count: break
             
             if page_batch: yield page_batch
             page += 1
-            # 每翻 3 页随机回一次 1688 首页,消除路径单一性
-            if page % 3 == 0:
-                self.driver.get("https://www.1688.com")
-                time.sleep(random.randint(10, 20))
+            if page % 2 == 0:
+                self.driver.get("https://www.1688.com") # 频繁回首页重置路径指纹
+                time.sleep(random.randint(15, 30))
         return list(all_links)
 
     def scrape_detail(self, url):
         try:
             self.driver.get(url)
-            # --- 核心改进:详情页留存仿真 ---
-            self._human_behavior(duration=random.randint(12, 25))
+            # 详情页留存时长翻倍
+            self._human_behavior(duration=random.randint(15, 35))
             self.check_for_captcha()
             
             model = self.driver.execute_script(
@@ -223,10 +217,6 @@ class Scraper1688:
                 return ""
 
             trade = model.get("tradeModel", {}) if isinstance(model, dict) else {}
-            price_min = trade.get("minPrice", "") or ""
-            if not price_min:
-                try: price_min = model["sku"]["priceRange"][0][1]
-                except: pass
             ranges = trade.get("disPriceRanges") or trade.get("currentPrices") or []
             range_text = " / ".join([f"{r.get('beginAmount')}起 ¥{r.get('price') or r.get('discountPrice')}" for r in ranges])
 
@@ -236,7 +226,7 @@ class Scraper1688:
                 "name": (model.get("offerDetail", {}).get("subject", "") if isinstance(model, dict) else "") or self.driver.title.split('-')[0],
                 "spec": get_attr("尺码") or get_attr("规格") or get_attr("型号"),
                 "material": get_attr("材质") or get_attr("面料"),
-                "price": price_min,
+                "price": trade.get("minPrice", ""),
                 "moq": trade.get("beginAmount", ""),
                 "wholesale_price": range_text,
                 "link": url,
@@ -258,12 +248,6 @@ class Scraper1688:
             return [base_data]
         except: return None
 
-    def clean_url(self, url):
-        if not url: return ""
-        id_match = re.search(r'offer/(\d+)\.html', url)
-        if id_match: return f"https://detail.1688.com/offer/{id_match.group(1)}.html"
-        return url
-
     def _extract_all_methods(self):
         results = []
         try:
@@ -279,14 +263,18 @@ class Scraper1688:
                     return None
                 for o in (find_list(data) or []):
                     link = o.get('itemUrl', o.get('url', ''))
-                    if link: results.append({"name": str(o.get('title', '')), "link": link})
+                    # 过滤掉非商品主链接的干扰
+                    if link and "similar_search" not in link:
+                        results.append({"name": str(o.get('title', '')), "link": link})
         except: pass
         if not results:
             for s in [".search-offer-item", "[class*='offer-card']", ".offer-item"]:
                 for el in self.driver.find_elements(By.CSS_SELECTOR, s):
                     try:
-                        link = el.find_element(By.TAG_NAME, "a").get_attribute("href")
-                        if link: results.append({"name": el.text.split('\n')[0][:50], "link": link})
+                        a = el.find_element(By.TAG_NAME, "a")
+                        link = a.get_attribute("href")
+                        if link and "similar_search" not in link:
+                            results.append({"name": el.text.split('\n')[0][:50], "link": link})
                     except: continue
                 if results: break
         return results