|
|
@@ -20,8 +20,9 @@ from selenium.webdriver.common.action_chains import ActionChains
|
|
|
from selenium_stealth import stealth
|
|
|
|
|
|
class Scraper1688:
|
|
|
- def __init__(self, headless=True):
|
|
|
+ def __init__(self, headless=True, status_callback=None):
|
|
|
self.headless = headless
|
|
|
+ self.status_callback = status_callback # 用于回调 GUI 状态
|
|
|
self.user_data_path = os.path.abspath(os.path.join(os.getcwd(), "1688_user_data"))
|
|
|
self._cleanup()
|
|
|
options = uc.ChromeOptions()
|
|
|
@@ -67,79 +68,235 @@ class Scraper1688:
|
|
|
return url
|
|
|
|
|
|
def check_for_captcha(self):
|
|
|
+ """
|
|
|
+ 核心监控:检测登录、滑块验证、访问受限等需要人工干预的状态
|
|
|
+ """
|
|
|
def is_blocked():
|
|
|
try:
|
|
|
- src, url, title = self.driver.page_source.lower(), self.driver.current_url.lower(), self.driver.title.lower()
|
|
|
+ url = self.driver.current_url.lower()
|
|
|
+ src = self.driver.page_source.lower()
|
|
|
+ title = self.driver.title.lower()
|
|
|
+
|
|
|
+ # 1. 检测滑块验证码
|
|
|
sliders = self.driver.find_elements(By.ID, "nc_1_n1z")
|
|
|
is_slider = len(sliders) > 0 and sliders[0].is_displayed()
|
|
|
- return is_slider or "punish" in url or "哎哟喂" in src or "验证码" in title
|
|
|
- except: return False
|
|
|
+
|
|
|
+ # 2. 检测登录页面 (如果跳转到了登录页)
|
|
|
+ is_login = "login.1688.com" in url or "passport.1688.com" in url
|
|
|
+
|
|
|
+ # 3. 检测惩罚/验证提示页
|
|
|
+ is_punish = "punish" in url or "哎哟喂" in src or "验证码" in title or "验证提示" in title
|
|
|
+
|
|
|
+ # 4. 检测是否被登出 (如果页面包含登录按钮且当前是详情/搜索页)
|
|
|
+ # 这部分可以根据实际情况增强,目前主要靠 URL 判定
|
|
|
+
|
|
|
+ return is_slider or is_login or is_punish
|
|
|
+ except:
|
|
|
+ return False
|
|
|
|
|
|
if is_blocked():
|
|
|
- print("\n[!] 触发拦截,请手动完成验证...")
|
|
|
- while is_blocked(): time.sleep(2)
|
|
|
- print("[+] 验证通过!")
|
|
|
+ msg = "请登录验证"
|
|
|
+ print(f"\n[!] {msg}...")
|
|
|
+ if self.status_callback:
|
|
|
+ self.status_callback(True, msg)
|
|
|
+
|
|
|
+ # 持续监控,直到上述所有拦截状态消失
|
|
|
+ while is_blocked():
|
|
|
+ time.sleep(2)
|
|
|
+
|
|
|
+ if self.status_callback:
|
|
|
+ self.status_callback(False, "验证通过")
|
|
|
+ print("\n[OK] 监测到人工干预已完成,3秒后恢复自动抓取...")
|
|
|
time.sleep(3)
|
|
|
return True
|
|
|
|
|
|
- def search_products_yield(self, keyword, total_count=200):
|
|
|
+ # def search_products_yield(self, keyword, total_count=200):
|
|
|
+ def search_products_yield(self, keyword, total_count=20):
|
|
|
gbk_keyword = urllib.parse.quote(keyword, encoding='gbk')
|
|
|
base_url = f"https://s.1688.com/selloffer/offer_search.htm?keywords={gbk_keyword}&n=y&netType=1%2C11%2C16"
|
|
|
|
|
|
+ # 初始检查:确保在开始抓取前没被拦截(比如没登录)
|
|
|
+ self.driver.get("https://www.1688.com")
|
|
|
+ self.check_for_captcha()
|
|
|
+
|
|
|
all_links = set()
|
|
|
page = 1
|
|
|
consecutive_empty_pages = 0
|
|
|
|
|
|
while len(all_links) < total_count and consecutive_empty_pages < 3:
|
|
|
- print(f"[*] 正在抓取第 {page} 页...")
|
|
|
+ print(f"[*] 正在抓取列表页: 第 {page} 页...")
|
|
|
target_url = f"{base_url}&beginPage={page}&page={page}"
|
|
|
self.driver.get(target_url)
|
|
|
|
|
|
# 关键:首屏强制等待渲染
|
|
|
- time.sleep(10)
|
|
|
+ time.sleep(5)
|
|
|
self.check_for_captcha()
|
|
|
|
|
|
- # 深度滚动
|
|
|
- for i in range(1, 6):
|
|
|
- self.driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {i/5});")
|
|
|
- time.sleep(1.2)
|
|
|
+ # 深度滚动确保加载
|
|
|
+ for i in range(1, 4):
|
|
|
+ self.driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {i/3});")
|
|
|
+ time.sleep(1)
|
|
|
|
|
|
page_results = self._extract_all_methods()
|
|
|
|
|
|
- new_batch = []
|
|
|
+ page_batch = []
|
|
|
for it in page_results:
|
|
|
- it["link"] = self.clean_url(it["link"])
|
|
|
- if it["link"] and it["link"] not in all_links:
|
|
|
- all_links.add(it["link"])
|
|
|
- new_batch.append(it)
|
|
|
+ clean_url = self.clean_url(it["link"])
|
|
|
+ if clean_url and clean_url not in all_links:
|
|
|
+ all_links.add(clean_url)
|
|
|
+
|
|
|
+ # 核心改进:进入详情页抓取精准数据
|
|
|
+ print(f" [>] 抓取详情: {clean_url}")
|
|
|
+ detail_results = self.scrape_detail(clean_url)
|
|
|
+ if detail_results:
|
|
|
+ # detail_results 现在是一个列表 (包含多个颜色分类)
|
|
|
+ page_batch.extend(detail_results)
|
|
|
+ else:
|
|
|
+ # 兜底
|
|
|
+ it["link"] = clean_url
|
|
|
+ page_batch.append({
|
|
|
+ "category": "", "brand": "", "name": it["name"],
|
|
|
+ "color": "", "spec": "", "material": "", "price": it["price"],
|
|
|
+ "moq": "", "wholesale_price": "", "link": clean_url, "supplier": ""
|
|
|
+ })
|
|
|
+
|
|
|
+ # 每满 10 条 yield 一次
|
|
|
+ if len(page_batch) >= 10:
|
|
|
+ yield page_batch
|
|
|
+ page_batch = []
|
|
|
+
|
|
|
+ # 详情页抓取后的随机等待
|
|
|
+ time.sleep(random.uniform(2, 4))
|
|
|
+
|
|
|
+ if len(all_links) >= total_count:
|
|
|
+ break
|
|
|
|
|
|
- if new_batch:
|
|
|
- consecutive_empty_pages = 0
|
|
|
- yield new_batch
|
|
|
- else:
|
|
|
- print(f"[-] 第 {page} 页未发现新数据,尝试刷新重试...")
|
|
|
- self.driver.refresh()
|
|
|
- time.sleep(8)
|
|
|
- retry_results = self._extract_all_methods()
|
|
|
- new_retry = []
|
|
|
- for it in retry_results:
|
|
|
- it["link"] = self.clean_url(it["link"])
|
|
|
- if it["link"] and it["link"] not in all_links:
|
|
|
- all_links.add(it["link"]); new_retry.append(it)
|
|
|
-
|
|
|
- if new_retry:
|
|
|
- yield new_retry
|
|
|
- else:
|
|
|
- consecutive_empty_pages += 1
|
|
|
- print(f"[!] 连续 {consecutive_empty_pages} 页无数据")
|
|
|
+ # 每页结束,将不足 10 条的余数 yield 出去
|
|
|
+ if page_batch:
|
|
|
+ yield page_batch
|
|
|
+ page_batch = []
|
|
|
|
|
|
page += 1
|
|
|
if len(all_links) < total_count:
|
|
|
- print(f"[*] 累计抓取: {len(all_links)} 条,准备翻页...")
|
|
|
- time.sleep(5)
|
|
|
+ print(f"[*] 累计已处理: {len(all_links)} 条,准备翻下一页...")
|
|
|
+ time.sleep(3)
|
|
|
|
|
|
return list(all_links)
|
|
|
|
|
|
+ def scrape_detail(self, url):
|
|
|
+ """
|
|
|
+ 根据 /refe/req.py 订正的详情页抓取逻辑
|
|
|
+ 获取极其精准的商品属性和价格数据,并支持将“颜色分类”拆分为多行
|
|
|
+ """
|
|
|
+ try:
|
|
|
+ self.driver.get(url)
|
|
|
+ time.sleep(2)
|
|
|
+ self.check_for_captcha()
|
|
|
+
|
|
|
+ # 执行 JS 获取 1688 详情页背后的完整数据模型
|
|
|
+ model = self.driver.execute_script(
|
|
|
+ "return (window.context && window.context.result && "
|
|
|
+ "window.context.result.global && window.context.result.global.globalData "
|
|
|
+ "&& window.context.result.global.globalData.model) || "
|
|
|
+ "window.__INITIAL_DATA__ || window.iDetailData || window.iDetailConfig || null;"
|
|
|
+ )
|
|
|
+
|
|
|
+ if not model:
|
|
|
+ return None
|
|
|
+
|
|
|
+ def get_attr(name):
|
|
|
+ """从 featureAttributes 里取指定属性值"""
|
|
|
+ try:
|
|
|
+ # 现代版
|
|
|
+ attrs = model.get("offerDetail", {}).get("featureAttributes", [])
|
|
|
+ for item in attrs:
|
|
|
+ if name in item.get("name", ""): return item.get("value", "")
|
|
|
+ # 老版兼容
|
|
|
+ attrs = model.get("detailData", {}).get("attributes", [])
|
|
|
+ for item in attrs:
|
|
|
+ if name in item.get("attributeName", ""): return item.get("value", "")
|
|
|
+ except: pass
|
|
|
+ return ""
|
|
|
+
|
|
|
+ def safe_text(by, sel):
|
|
|
+ try:
|
|
|
+ return self.driver.find_element(by, sel).text.strip()
|
|
|
+ except: return ""
|
|
|
+
|
|
|
+ # 价格处理逻辑
|
|
|
+ trade = model.get("tradeModel", {}) if isinstance(model, dict) else {}
|
|
|
+ price_min = trade.get("minPrice", "") or ""
|
|
|
+ price_max = trade.get("maxPrice", "") or ""
|
|
|
+ # 老版价格补丁
|
|
|
+ if not price_min:
|
|
|
+ try: price_min = model["sku"]["priceRange"][0][1]
|
|
|
+ except: pass
|
|
|
+
|
|
|
+ begin_amount = trade.get("beginAmount", "")
|
|
|
+
|
|
|
+ # 批发价区间
|
|
|
+ ranges = trade.get("disPriceRanges") or trade.get("currentPrices") or \
|
|
|
+ trade.get("offerPriceModel", {}).get("currentPrices", [])
|
|
|
+ range_text = " / ".join(
|
|
|
+ [f"{r.get('beginAmount')}起 ¥{r.get('price') or r.get('discountPrice')}" for r in ranges]
|
|
|
+ ) if ranges else ""
|
|
|
+
|
|
|
+ # 基础数据模板
|
|
|
+ base_data = {
|
|
|
+ "category": (model.get("offerDetail", {}).get("leafCategoryName", "") if isinstance(model, dict) else "")
|
|
|
+ or safe_text(By.CSS_SELECTOR, "div[class*=breadcrumb] a:last-child"),
|
|
|
+ "brand": get_attr("品牌"),
|
|
|
+ "name": (model.get("offerDetail", {}).get("subject", "") if isinstance(model, dict) else "")
|
|
|
+ or safe_text(By.CSS_SELECTOR, "h1.d-title")
|
|
|
+ or safe_text(By.CSS_SELECTOR, "h1[class*=title]"),
|
|
|
+ "color": "", # 待填充
|
|
|
+ "spec": get_attr("尺码") or get_attr("规格") or get_attr("型号") or \
|
|
|
+ safe_text(By.XPATH, "//div[@id='productAttributes']//th[span='尺码' or span='规格']/following-sibling::td[1]//span[@class='field-value']"),
|
|
|
+ "material": get_attr("材质") or get_attr("面料") or \
|
|
|
+ safe_text(By.XPATH, "//div[@id='productAttributes']//th[span='材质']/following-sibling::td[1]//span[@class='field-value']"),
|
|
|
+ "price": f"{price_min}-{price_max}" if price_min and price_max and price_min != price_max else f"{price_min}" if price_min else "",
|
|
|
+ "moq": begin_amount or safe_text(By.XPATH, "//div[@id='productAttributes']//th[span='起订量' or span='起批量']/following-sibling::td[1]//span[@class='field-value']"),
|
|
|
+ "wholesale_price": range_text,
|
|
|
+ "link": url,
|
|
|
+ "supplier": (model.get("sellerModel", {}).get("companyName", "") if isinstance(model, dict) else "")
|
|
|
+ or safe_text(By.CSS_SELECTOR, "a.company-name")
|
|
|
+ or safe_text(By.CSS_SELECTOR, "div.company-name"),
|
|
|
+ }
|
|
|
+
|
|
|
+ # --- 核心逻辑:拆分颜色分类 ---
|
|
|
+ sku_props = []
|
|
|
+ try:
|
|
|
+ # 尝试多种路径获取 SKU 属性
|
|
|
+ sku_props = model.get("skuModel", {}).get("skuProps", []) or \
|
|
|
+ model.get("detailData", {}).get("skuProps", []) or \
|
|
|
+ model.get("sku", {}).get("skuProps", [])
|
|
|
+ except: pass
|
|
|
+
|
|
|
+ # 寻找“颜色分类”或类似的属性
|
|
|
+ color_prop = next((p for p in sku_props if p.get("prop") in ["颜色", "颜色分类", "花色"]), None)
|
|
|
+
|
|
|
+ if color_prop and color_prop.get("value"):
|
|
|
+ variant_results = []
|
|
|
+ for val in color_prop["value"]:
|
|
|
+ # 只有当该分类确实有名字时才记录
|
|
|
+ c_name = val.get("name")
|
|
|
+ if c_name:
|
|
|
+ row = base_data.copy()
|
|
|
+ row["color"] = c_name
|
|
|
+ variant_results.append(row)
|
|
|
+ return variant_results
|
|
|
+ else:
|
|
|
+ # 兜底:如果没有 SKU 拆分,则尝试获取单属性颜色
|
|
|
+ base_data["color"] = get_attr("颜色") or get_attr("颜色分类") or ""
|
|
|
+ return [base_data]
|
|
|
+
|
|
|
+ except Exception as e:
|
|
|
+ print(f"[!] 详情页抓取异常 ({url}): {e}")
|
|
|
+ return None
|
|
|
+ except Exception as e:
|
|
|
+ print(f"[!] 详情页抓取异常 ({url}): {e}")
|
|
|
+ return None
|
|
|
+
|
|
|
def _extract_all_methods(self):
|
|
|
"""三位一体提取法:JSON + DOM + 深度搜索"""
|
|
|
results = []
|