|
|
@@ -1,6 +1,5 @@
|
|
|
-# 【版本:2026-01-16 14:45 - 全功能合一终极版】
|
|
|
-# 核心功能:支持变体拆分、精准提取款式与价格、对标 req.py 逻辑、激活懒加载
|
|
|
-# 反爬策略:极低频抓取、大跨度深度休眠、行为路径混淆
|
|
|
+# 【版本:2026-01-16 15:00 - 变体多行抓取终极修正版】
|
|
|
+# 核心功能:支持变体拆分、精准提取款式与价格、多行写入 Excel
|
|
|
import sys
|
|
|
try:
|
|
|
import distutils
|
|
|
@@ -35,7 +34,6 @@ class Scraper1688:
|
|
|
stealth(self.driver, languages=["zh-CN", "zh"], vendor="Google Inc.", platform="Win32", fix_hairline=True)
|
|
|
|
|
|
def _find_chrome(self):
|
|
|
- """ 强力锁定 Chrome 安装路径 """
|
|
|
import winreg
|
|
|
reg_paths = [(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe"), (winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe")]
|
|
|
for hkey, subkey in reg_paths:
|
|
|
@@ -73,17 +71,14 @@ class Scraper1688:
|
|
|
return opts
|
|
|
try:
|
|
|
self.driver = uc.Chrome(options=create_options(), headless=headless, browser_executable_path=chrome_path, use_subprocess=True)
|
|
|
- except:
|
|
|
+ except Exception:
|
|
|
self.driver = uc.Chrome(options=create_options(), headless=headless, use_subprocess=True)
|
|
|
|
|
|
def clean_url(self, url):
|
|
|
- """ 鲁棒的 ID 提取并重组链接 """
|
|
|
if not url: return ""
|
|
|
url_str = str(url)
|
|
|
- if url_str.startswith("//"): url_str = "https:" + url_str
|
|
|
id_match = re.search(r'(\d{9,15})', url_str)
|
|
|
- if id_match:
|
|
|
- return f"https://detail.1688.com/offer/{id_match.group(1)}.html"
|
|
|
+ if id_match: return f"https://detail.1688.com/offer/{id_match.group(1)}.html"
|
|
|
return ""
|
|
|
|
|
|
def check_for_captcha(self):
|
|
|
@@ -98,8 +93,7 @@ class Scraper1688:
|
|
|
if self.status_callback: self.status_callback(True, msg)
|
|
|
while is_blocked(): time.sleep(2)
|
|
|
if self.status_callback: self.status_callback(False, "验证通过")
|
|
|
- if self.log_callback: self.log_callback("<font color='orange'>验证成功,进入 120 秒冷却期以规避风控...</font>")
|
|
|
- time.sleep(120)
|
|
|
+ time.sleep(random.randint(60, 120))
|
|
|
return True
|
|
|
|
|
|
def search_products_yield(self, keyword, total_count=200, existing_links=None):
|
|
|
@@ -115,17 +109,17 @@ class Scraper1688:
|
|
|
self.driver.get(f"{base_url}&beginPage={page}&page={page}")
|
|
|
self.check_for_captcha()
|
|
|
|
|
|
- # --- 关键改进:15段脉冲式回弹滚动,彻底激活懒加载 ---
|
|
|
+ # --- 关键:脉冲式分段滚动,强制触发懒加载 ---
|
|
|
for i in range(1, 16):
|
|
|
self.driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {i/15});")
|
|
|
time.sleep(random.uniform(1.2, 2.5))
|
|
|
if i % 4 == 0:
|
|
|
self.driver.execute_script(f"window.scrollBy(0, -400);")
|
|
|
time.sleep(1.0)
|
|
|
- time.sleep(5)
|
|
|
+ time.sleep(random.uniform(3, 6))
|
|
|
|
|
|
page_results = self._extract_all_methods()
|
|
|
- print(f" [+] 本页解析完成:共发现 {len(page_results)} 个商品条目")
|
|
|
+ print(f" [+] 本页解析完成:共发现 {len(page_results)} 个商品链接")
|
|
|
|
|
|
page_batch = []
|
|
|
for it in page_results:
|
|
|
@@ -133,11 +127,10 @@ class Scraper1688:
|
|
|
if clean_url and clean_url not in all_links:
|
|
|
all_links.add(clean_url)
|
|
|
|
|
|
- # 冷却机制:每 12 条大休息一次
|
|
|
new_count = len(all_links) - initial_count
|
|
|
if new_count > 0 and new_count % 12 == 0:
|
|
|
rest_secs = random.randint(300, 600)
|
|
|
- if self.log_callback: self.log_callback(f"<font color='red'><b>保护机制:进入深度休眠 {rest_secs//60} 分钟...</b></font>")
|
|
|
+ if self.log_callback: self.log_callback(f"<font color='red'><b>保护机制:进入休眠 {rest_secs//60} 分钟...</b></font>")
|
|
|
time.sleep(rest_secs)
|
|
|
|
|
|
print(f" [>] 详情仿真抓取: {clean_url}")
|
|
|
@@ -151,29 +144,32 @@ class Scraper1688:
|
|
|
yield page_batch
|
|
|
page_batch = []
|
|
|
|
|
|
- # 详情页后的随机等待
|
|
|
time.sleep(random.uniform(40, 80))
|
|
|
if len(all_links) >= total_count + initial_count: break
|
|
|
|
|
|
if page_batch: yield page_batch
|
|
|
page += 1
|
|
|
- # 列表页间的重置
|
|
|
self.driver.get("https://www.1688.com")
|
|
|
time.sleep(60)
|
|
|
return list(all_links)
|
|
|
|
|
|
def scrape_detail(self, url):
|
|
|
- """ 极精准变体解析:针对 expand-view-list 区域,成对提取款式与价格 """
|
|
|
+ """ 极精准变体解析:针对 expand-view-list 区域,精准提取款式名称与逐条价格 """
|
|
|
try:
|
|
|
self.driver.get(url)
|
|
|
+ # 详情页仿真阅读
|
|
|
time.sleep(random.uniform(8, 15))
|
|
|
+ for _ in range(random.randint(2, 4)):
|
|
|
+ self.driver.execute_script(f"window.scrollBy(0, {random.randint(300, 700)});")
|
|
|
+ time.sleep(random.uniform(2.0, 4.0))
|
|
|
|
|
|
- # 自动展开隐藏变体
|
|
|
+ # 尝试展开隐藏变体 (关键动作)
|
|
|
try:
|
|
|
- expand_btns = self.driver.find_elements(By.XPATH, "//div[contains(@class,'expand-view-list')]//div[contains(text(),'更多') or contains(text(),'展开')]")
|
|
|
- if expand_btns:
|
|
|
- self.driver.execute_script("arguments[0].click();", expand_btns[0])
|
|
|
- time.sleep(2)
|
|
|
+ expand_btns = self.driver.find_elements(By.CSS_SELECTOR, ".expand-view-list .more-btn, .expand-view-list [class*='more']")
|
|
|
+ for btn in expand_btns:
|
|
|
+ if btn.is_displayed():
|
|
|
+ self.driver.execute_script("arguments[0].click();", btn)
|
|
|
+ time.sleep(1.5)
|
|
|
except: pass
|
|
|
|
|
|
self.check_for_captcha()
|
|
|
@@ -201,7 +197,6 @@ class Scraper1688:
|
|
|
"spec": "",
|
|
|
"color": "",
|
|
|
"material": get_attr("材质") or get_attr("面料"),
|
|
|
- "price": "",
|
|
|
"moq": trade.get("beginAmount", ""),
|
|
|
"wholesale_price": range_text,
|
|
|
"link": url,
|
|
|
@@ -210,40 +205,45 @@ class Scraper1688:
|
|
|
|
|
|
variant_results = []
|
|
|
try:
|
|
|
- # 锁定容器
|
|
|
- wrappers = self.driver.find_elements(By.CSS_SELECTOR, ".expand-view-list, .expand-view-list-wrapper")
|
|
|
+ # 【核心修正】精准锁定变体容器并成对提取款式和价格
|
|
|
+ wrappers = self.driver.find_elements(By.CSS_SELECTOR, ".expand-view-list, .expand-view-list-wrapper, .sku-wrapper")
|
|
|
if wrappers:
|
|
|
- # 寻找容器下的每一个子条目
|
|
|
- items = wrappers[0].find_elements(By.CSS_SELECTOR, ".expand-view-list-item, [class*='list-item'], .sku-item")
|
|
|
- for item_el in items:
|
|
|
+ # 寻找每一个变体子项条目
|
|
|
+ labels = wrappers[0].find_elements(By.CLASS_NAME, "item-label")
|
|
|
+ for l_el in labels:
|
|
|
try:
|
|
|
- # 描述文字 -> 颜色列 + 规格列
|
|
|
- label_el = item_el.find_elements(By.CLASS_NAME, "item-label")
|
|
|
- # 单价 -> 价格列
|
|
|
- price_el = item_el.find_elements(By.CLASS_NAME, "item-price-stock")
|
|
|
+ label_text = l_el.text.strip()
|
|
|
+ if not label_text: continue
|
|
|
|
|
|
- if label_el and price_el:
|
|
|
- label_text = label_el[0].text.strip()
|
|
|
- price_raw = price_el[0].text.strip()
|
|
|
- price_clean = re.sub(r'[^\d.]', '', price_raw)
|
|
|
-
|
|
|
- if label_text:
|
|
|
- row = base_data.copy()
|
|
|
- row["color"] = label_text
|
|
|
- row["spec"] = label_text
|
|
|
- row["price"] = price_clean if price_clean else price_raw
|
|
|
- variant_results.append(row)
|
|
|
+ # 通过 label 所在的行级父容器寻找其对应的价格
|
|
|
+ price_text = ""
|
|
|
+ curr = l_el
|
|
|
+ for _ in range(3): # 向上寻找 3 层
|
|
|
+ curr = curr.find_element(By.XPATH, "..")
|
|
|
+ prices = curr.find_elements(By.CLASS_NAME, "item-price-stock")
|
|
|
+ if prices:
|
|
|
+ price_text = prices[0].text.strip()
|
|
|
+ break
|
|
|
+
|
|
|
+ if price_text:
|
|
|
+ price_clean = re.sub(r'[^\d.]', '', price_text)
|
|
|
+ row = base_data.copy()
|
|
|
+ # 描述文字写入颜色和规格尺码列
|
|
|
+ row["color"] = label_text
|
|
|
+ row["spec"] = label_text
|
|
|
+ row["price"] = price_clean
|
|
|
+ variant_results.append(row)
|
|
|
except: continue
|
|
|
except: pass
|
|
|
|
|
|
if variant_results:
|
|
|
- print(f" [+] 成功解析到 {len(variant_results)} 个规格变体")
|
|
|
+ print(f" [+] 成功解析到 {len(variant_results)} 个款式变体")
|
|
|
return variant_results
|
|
|
return [base_data]
|
|
|
except: return None
|
|
|
|
|
|
def _extract_all_methods(self):
|
|
|
- """ 强化版:对标 req.py 深度探测所有内存数据 """
|
|
|
+ """ 强化版探测:从 JS 全局变量和 DOM 中提取所有链接 """
|
|
|
results = []
|
|
|
seen_ids = set()
|
|
|
def add_item(name, link):
|
|
|
@@ -251,11 +251,7 @@ class Scraper1688:
|
|
|
if cid and cid not in seen_ids:
|
|
|
seen_ids.add(cid); results.append({"name": name, "link": cid})
|
|
|
|
|
|
- scripts = [
|
|
|
- "return JSON.stringify(window.data || window.context?.result?.data || window.__INITIAL_DATA__)",
|
|
|
- "return JSON.stringify(window.context?.result?.global?.globalData?.data || null)",
|
|
|
- "return JSON.stringify(window.pageData || null)"
|
|
|
- ]
|
|
|
+ scripts = ["return JSON.stringify(window.data || window.context?.result?.data || window.__INITIAL_DATA__)", "return JSON.stringify(window.context?.result?.global?.globalData?.data || null)", "return JSON.stringify(window.pageData || null)"]
|
|
|
for s in scripts:
|
|
|
try:
|
|
|
res = self.driver.execute_script(s)
|
|
|
@@ -267,8 +263,10 @@ class Scraper1688:
|
|
|
if isinstance(obj, dict):
|
|
|
for k in obj: lists.extend(find_lists(obj[k]))
|
|
|
return lists
|
|
|
- for plist in find_lists(data):
|
|
|
- for o in plist: add_item(str(o.get('title', o.get('subject', ''))), o.get('itemUrl', o.get('url', '')) or str(o.get('offerId', '')))
|
|
|
+ for product_list in find_lists(data):
|
|
|
+ for o in product_list:
|
|
|
+ link = o.get('itemUrl', o.get('url', '')) or str(o.get('offerId', ''))
|
|
|
+ add_item(str(o.get('title', o.get('subject', ''))), link)
|
|
|
if results: return results
|
|
|
except: continue
|
|
|
return results
|