diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore"
new file mode 100644
index 0000000000000000000000000000000000000000..26d33521af10bcc7fd8cea344038eaaeb78d0ef5
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore"
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml"
new file mode 100644
index 0000000000000000000000000000000000000000..a193443617b9b03a23e930fa3bf830a8fd85af0c
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml"
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..65531ca992813bbfedbe43dfae5a5f4337168ed8
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml"
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..6aa381902ba946a3dc851e989e0effc53237da20
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml"
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..4fce1d86b49521afe1cee4ed1c13b6396ebbc6f3
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/__init__.py"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/__init__.py"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py"
new file mode 100644
index 0000000000000000000000000000000000000000..c1d7f45ba092a3053969b7c9e5dfac903da9617e
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py"
@@ -0,0 +1,19 @@
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://docs.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class JdCrawlerScrapyItem(scrapy.Item):
+ # define the fields for your item here like:
+ # name = scrapy.Field()
+ sku_id = scrapy.Field()
+ img = scrapy.Field()
+ price = scrapy.Field()
+ title = scrapy.Field()
+ shop = scrapy.Field()
+ icons = scrapy.Field()
+ sta_data = scrapy.Field()
+ keyword = scrapy.Field()
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py"
new file mode 100644
index 0000000000000000000000000000000000000000..55b92f11e342a3c3b0e03bf5b1edb0a7092725ce
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py"
@@ -0,0 +1,173 @@
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+from scrapy.downloadermiddlewares.retry import RetryMiddleware
+from scrapy.utils.response import response_status_message
+from scrapy.dupefilters import RFPDupeFilter
+import hashlib
+import weakref
+from w3lib.url import canonicalize_url
+from scrapy.utils.python import to_bytes
+
+_fingerprint_cache = weakref.WeakKeyDictionary()
+
+# useful for handling different item types with a single interface
+from itemadapter import is_item, ItemAdapter
+
+
+class JdCrawlerScrapySpiderMiddleware:
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, or item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Request or item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class JdCrawlerScrapyDownloaderMiddleware:
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class CookieMiddleware:
+ def process_request(self, request):
+ cookie_pool = []
+ request.headers['cookies'] = cookie_pool.pop()
+ def process_response(self,response):
+ """
+ 根据response返回的内容判断当前cookie是否过期
+ :param response:
+ :return:
+ """
+ # if "身份过期" in response.text:
+ # raise Exception("当前cookie身份已过期")
+
+ def process_exception(self, request, exception, spider):
+ if isinstance(exception,IndexError):
+ retry_times = request.meta.get('retry_times')
+ request.meta['retry_times'] = retry_times - 1
+
+ return request
+
+class UAMiddleware:
+ def process_request(self, request, spider):
+ """
+ 正式请求前添加请求头
+ :param request:
+ :param spider:
+ :return:
+ """
+ request.headers["user-agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"
+
+
+class MyRetryMiddleware(RetryMiddleware):
+ """
+ 解决对方服务器返回正常状态码200,但是需要根据IP进行验证码验证的情。
+ 可以通过换IP解决,那么就应该重试。
+ """
+ def process_response(self, request, response, spider):
+ if request.meta.get('dont_retry', False):
+ return response
+ if "验证码" in response.text:
+ reason = response_status_message(response.status)
+ return self._retry(request, reason, spider) or response
+ return response
+
+class MyRFPDupeFilter(RFPDupeFilter):
+ def request_fingerprint(self, request, include_headers=None, keep_fragments=False):
+ cache = _fingerprint_cache.setdefault(request, {})
+ cache_key = (include_headers, keep_fragments)
+ if cache_key not in cache:
+ fp = hashlib.sha1()
+ fp.update(to_bytes(request.method))
+ fp.update(to_bytes(canonicalize_url(request.url, keep_fragments=keep_fragments)))
+ fp.update(request.body or b'')
+ fp.update(request.meta.get("batch_no", "").encode("utf-8"))
+ cache[cache_key] = fp.hexdigest()
+ return cache[cache_key]
+
+
+
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py"
new file mode 100644
index 0000000000000000000000000000000000000000..0aa2a8955ac60105452e979538ad13e0c56f926e
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py"
@@ -0,0 +1,31 @@
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
+
+
+# useful for handling different item types with a single interface
+from itemadapter import ItemAdapter
+
+from jd_crawler_scrapy.items import JdCrawlerScrapyItem
+import pymysql
+
+class JdCrawlerScrapyPipeline:
+ def __init__(self):
+ self.mysql_con = None
+
+ def process_item(self, item, spider):
+ if not self.mysql_con:
+ self.mysql_con = pymysql.connect(**spider.settings["MYSQL_CONF"])
+
+ if isinstance(item, JdCrawlerScrapyItem):
+ cursor = self.mysql_con.cursor()
+ SQL = """INSERT INTO jd_search(sku_id, img, price, title, shop, icons, keyword, sta_date)
+ VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')""".format(
+ item['sku_id'], item['img'], item['price'], item['title'], item['shop'], item['icons'], item['keyword'], item['sta_date']
+ )
+ cursor.execute(SQL)
+ self.mysql_con.commit()
+ cursor.close()
+
+ return item
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py"
new file mode 100644
index 0000000000000000000000000000000000000000..066217194c3487124795dec380bafafd114ef6b0
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py"
@@ -0,0 +1,4 @@
+from scrapy import cmdline
+
+command = "scrapy crawl jd_search".split()
+cmdline.execute(command)
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py"
new file mode 100644
index 0000000000000000000000000000000000000000..dc356206d474b61a66ae3d0e6edc6f81ee4a2d75
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py"
@@ -0,0 +1,27 @@
+import redis
+import time
+import json
+
+redis_con = redis.Redis(host="localhost", port=6379, db=5)
+
+def search_producer():
+ for keyword in ["键盘", "耳机", "笔记本电脑"]:
+ for page_num in range(1, 3):
+ url = f"https://search.jd.com/Search?keyword={keyword}&page={page_num}"
+ meta = {
+ "sta_date": time.strftime("%Y-%m-%d"),
+ "keyword": keyword,
+ "page_num": page_num
+ }
+
+ task = json.dumps({
+ "url": url,
+ "body": '',
+ "method": "GET",
+ "meta": meta
+ })
+ redis_con.lpush("jd_search:start_urls", task)
+
+
+if __name__ == "__main__":
+ search_producer()
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py"
new file mode 100644
index 0000000000000000000000000000000000000000..b11634f462a6bfea7be9b0152438a061639b7177
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py"
@@ -0,0 +1,131 @@
+# Scrapy settings for jd_crawler_scrapy project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://docs.scrapy.org/en/latest/topics/settings.html
+# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'jd_crawler_scrapy'
+
+SPIDER_MODULES = ['jd_crawler_scrapy.spiders']
+NEWSPIDER_MODULE = 'jd_crawler_scrapy.spiders'
+
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+USER_AGENT = 'jd_crawler_scrapy (+http://www.yourdomain.com)'
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = False
+
+# Enable redirect
+REDIRECT_ENABLE = False
+
+# Retry
+RETRY_ENABLE = False
+RETRY_HTTP_CODES = [500, 502, 503, 504, 408, 429]
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+CONCURRENT_REQUESTS = 1
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+DOWNLOAD_DELAY = 2
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+# DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+# }
+
+# Enable or disable spider middlewares
+# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapySpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
+DOWNLOADER_MIDDLEWARES = {
+# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapyDownloaderMiddleware': 543,
+ 'jd_crawler_scrapy.middlewares.UAMiddleware': 100,
+ 'jd_crawler_scrapy.middlewares.MyRetryMiddleware': 200,
+ 'jd_crawler_scrapy.middlewares.CookieMiddleware': 150,
+}
+
+# Enable or disable extensions
+# See https://docs.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.extensions.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
+ITEM_PIPELINES = {
+ 'jd_crawler_scrapy.pipelines.JdCrawlerScrapyPipeline': 300,
+}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = 'httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
+
+#MYSQL CONF
+MYSQL_CONF = {
+ "host": "127.0.0.1",
+ "user": "root",
+ "password": "lp82nlf",
+ "db": "wong_mysql"
+}
+
+#LOG
+# LOG_FILE = "D:/LOG/jd_search.log"
+# LOG_LEVEL = "ERROR"
+
+# DUPEFILTER = "jd_crawler_scrapy.middlewares.MyRFPDupeFilter"
+
+
+# Scrapy-redis settings 更换调度器
+SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
+
+# SCHEDULER-QUEUE-CLASS 更换消息队列
+SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
+
+# DUPEFILTER-CLASS 更换过滤器,将请求指纹保存在redis中
+DUPEFILTER_CLASS = 'scrapy.redis.dupefilter.RFPDupeFilter'
+
+# SCHEDULER-PERSIST 消息队列持久化,不会清空redis中的消息队列
+SCHEDULER_PERSIST = True
+
+
+# Redis settings
+REDIS_HOST = 'localhost'
+REDIS_PORT = 6379
+REDIS_PARAMS = {"db": 5}
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py"
new file mode 100644
index 0000000000000000000000000000000000000000..ebd689ac51d69c5e1dbbe80083c2b20a39f8bb79
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py"
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py"
new file mode 100644
index 0000000000000000000000000000000000000000..bd5d38d7df1d37abb45374e01b141502976fb338
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py"
@@ -0,0 +1,61 @@
+import scrapy
+from bs4 import BeautifulSoup
+import json
+import time
+from jd_crawler_scrapy.items import JdCrawlerScrapyItem
+from scrapy.exceptions import CloseSpider
+from scrapy_redis.spiders import RedisSpider
+
+
+class JdSearch(RedisSpider):
+ name = "jd_search"
+ redis_key = f"{name}:start_urls"
+
+ def make_request_from_data(self, data):
+ task = json.loads(data.decode("urf-8"))
+ return scrapy.http.FormRequest(
+ url=task["url"],
+ formdata=json.loads(task["body"]) if task["body"] else '',
+ method=task["method"],
+ meta=task["meta"],
+ dont_filter=False,
+ callback=self.parse_search,
+ errback=self.process_error
+ )
+
+ def parse_search(self, response):
+ print(response)
+ soup = BeautifulSoup(response.text, "lxml")
+ item_array = soup.select("ul[class='gl-warp clearfix'] li[class='gl-item']")
+ for item in item_array:
+ try:
+ sku_id = item.attrs["data-sku"]
+ img = item.select("img[data-img='1']")
+ price = item.select("div[class='p-price']")
+ title = item.select("div[class='p-name p-name-type-2']")
+ shop = item.select("div[class='p-shop']")
+ icons = item.select("div[class='p-icons']")
+
+ img = img[0].attrs['data-lazy-img'] if img else ""
+ price = price[0].strong.i.text if price else ""
+ title = title[0].text.strip() if title else ""
+ shop = shop[0].a.attrs['title'] if shop[0].text.strip() else ""
+ icons = json.dumps([tag_ele.text for tag_ele in icons[0].select("i")]) if icons else '[]'
+
+ item = JdCrawlerScrapyItem()
+ item["sku_id"] = sku_id
+ item["img"] = img
+ item["price"] = price
+ item["title"] = title
+ item["shop"] = shop
+ item["icons"] = icons
+ item["sta_date"] = response.meta["sta_date"]
+ item["keyword"] = response.meta["keyword"]
+ yield item
+
+ except Exception as e:
+ print(e.args)
+
+ def process_error(self, failure):
+ print(failure)
+
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg"
new file mode 100644
index 0000000000000000000000000000000000000000..b6086855c9bbe4b6c4c9381fdc30da8031610cde
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg"
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = jd_crawler_scrapy.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = jd_crawler_scrapy
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/.gitignore" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/.gitignore"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..65531ca992813bbfedbe43dfae5a5f4337168ed8
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml"
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..61bb8b0f2ce72743b90eec2a20359ca7f8b323cc
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml"
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..4fce1d86b49521afe1cee4ed1c13b6396ebbc6f3
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml"
new file mode 100644
index 0000000000000000000000000000000000000000..a193443617b9b03a23e930fa3bf830a8fd85af0c
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml"
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml"
new file mode 100644
index 0000000000000000000000000000000000000000..f0f7fccb8ff17ad1c1caecac3ed6fff72cfbaf4e
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml"
@@ -0,0 +1,167 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1615191058673
+
+
+ 1615191058673
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ file://$PROJECT_DIR$/jd_parser/search.py
+ 5
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/jd_parser/search.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/jd_parser/search.py"
new file mode 100644
index 0000000000000000000000000000000000000000..9c814c5e7946b15ca9311cb92cc1478dcf5351b1
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/jd_parser/search.py"
@@ -0,0 +1,27 @@
+from bs4 import BeautifulSoup
+import json
+
+def parse_jd_item(html):
+ result = []
+ soup = BeautifulSoup(html, "lxml")
+ item_array = soup.select("ul[class='gl-warp clearfix'] li[class='gl-item']")
+ for item in item_array:
+ sku_id = item.attrs["data-sku"]
+ img = item.select("img[data-img='1']")
+ price = item.select("div[class='p-price']")
+ title = item.select("div[class='p-name p-name-type-2']")
+ shop = item.select("div[class='p-shop']")
+ icons = item.select("div[class='p-icons']")
+
+
+ img = img[0].attrs['data-lazy-img'] if img else ""
+ price = price[0].strong.i.text if price else ""
+ title = title[0].text.strip() if title else ""
+ shop = shop[0].a.attrs['title'] if shop[0].text.strip() else ""
+ icons = json.dumps([tag_ele.text for tag_ele in icons[0].select("i")]) if icons else '[]'
+
+ result.append((sku_id, img, price, title, shop, icons))
+
+ print(result)
+ return result
+
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/main.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/main.py"
new file mode 100644
index 0000000000000000000000000000000000000000..edd6b6e2259d600634648bee0917137a3925bfbd
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/main.py"
@@ -0,0 +1,58 @@
+
+import random
+import pymysql
+import requests
+import sys
+import os
+# sys.path.append(r"D:\pycharm\pythonproject\second-python-bootcamp\第二期训练营\2班\2班_Wong\week10")
+print(sys.path)
+from jd_parser.search import parse_jd_item
+from settings import MYSQL_CONF, HEADERS
+
+def saver(item_array):
+ """
+ 持久化爬取结果
+ :param item_array:
+ :return:
+ """
+ cursor = mysql_con.cursor()
+ SQL = """INSERT INTO jd_search(sku_id, img, price, title, shop, icons)
+ VALUES (%s, %s, %s, %s, %s, %s)"""
+ cursor.executemany(SQL, item_array)
+ mysql_con.commit()
+ cursor.close()
+
+
+def downloader(task):
+ """
+ 请求目标网址的组件
+ :param task:
+ :return:
+ """
+ url = "https://search.jd.com/Search"
+ params = {
+ "keyword": task
+ }
+ res = requests.get(url=url, params=params, headers=HEADERS)
+ return res
+
+
+
+def main(task_array):
+ """
+ 爬虫任务的调度
+ :param task_array:
+ :return:
+ """
+ for task in task_array:
+ result = downloader(task)
+ item_array = parse_jd_item(result.text)
+ saver(item_array)
+
+
+
+if __name__ == "__main__":
+ mysql_con = pymysql.connect(**MYSQL_CONF)
+ task_array = ["鼠标", "键盘", "耳机", "笔记本电脑"]
+ main(task_array)
+ print("done")
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/settings.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/settings.py"
new file mode 100644
index 0000000000000000000000000000000000000000..380bea6554d203e17220df645ef17f2eb3c737b2
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/settings.py"
@@ -0,0 +1,13 @@
+
+HEADERS = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
+ "upgrade-insecure-requests": "1"
+}
+
+
+MYSQL_CONF = {
+ "host": "127.0.0.1",
+ "user": "root",
+ "password": "lp82nlf",
+ "db": "wong_mysql"
+}
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/parser_test.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/parser_test.py"
new file mode 100644
index 0000000000000000000000000000000000000000..95018615388d04dbdd4c05aeb1e192b18ebe5cc0
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/parser_test.py"
@@ -0,0 +1,11 @@
+
+import sys
+import os
+print(os.getcwd())
+sys.path.append(r"D:\pycharm\pythonproject\second-python-bootcamp\第二期训练营\2班\2班_Wong\week10")
+print(sys.path)
+from jd_parser.search import parse_jd_item
+
+with open(r"shubiao.html", "r", encoding="utf-8") as f:
+ html = f.read()
+ parse_jd_item(html)
\ No newline at end of file
diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/shubiao.html" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/shubiao.html"
new file mode 100644
index 0000000000000000000000000000000000000000..cc9c27b41fe23f3fe18488e3bf7d53cedb71e882
--- /dev/null
+++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/test/shubiao.html"
@@ -0,0 +1,6403 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+鼠标 - 商品搜索 - 京东
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+ 你好,请登录 免费注册
+
+
+ -
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+ -
+
+
+
+ -
+
+
+ 客户服务
+
+
+
+
+ -
+
+
+ 网站导航
+
+
+
+
+ -
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
品牌:
+
+
+ - 所有品牌
+ - A
+ - B
+ - C
+ - D
+ - E
+ - F
+ - G
+ - H
+ - I
+ - J
+ - K
+ - L
+ - M
+ - N
+ - O
+ - P
+ - Q
+ - R
+ - S
+ - T
+ - U
+ - W
+ - X
+ - Y
+ - Z
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
高级选项:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥39.90
+
+
+
+
+
+
+
+
+ 自营
+
+
+
广告
+
![]()
+
+
+
+ -
+
+
+
+
+
+ ¥99.00
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥39.00
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+ 新品
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥22.90
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥19.90
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+ 秒杀
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥69.00
+
+
+
+
+
+
+
+
+ 自营
+ 闪购
+ 赠
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥129.00
+
+
+
+
+
+
+
+
+ 自营
+
+
+
广告
+
![]()
+
+
+
+ -
+
+
+
+
+
+ ¥28.90
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+ 秒杀
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥349.00
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥105.00
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+ 闪购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥109.00
+
+
+
+
+
+
+
+
+ 自营
+ 秒杀
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥9.90
+
+
+
+
+
+
+
+
+ 自营
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥23.80
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥65.00
+
+
+
+
+
+
+ 自营
+ 放心购
+ 闪购
+ 赠
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥219.00
+
+
+
+
+
+
+ 自营
+ 放心购
+ 秒杀
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥27.90
+
+
+
+
+
+
+
+
+ 京东物流
+ 放心购
+ 秒杀
+ 免邮
+ 券1000-100
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥39.00
+
+
+
+
+
+
+ 自营
+ 闪购
+ 赠
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥99.00
+
+
+
+
+
+
+
+
+ 自营
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥39.90
+
+
+
+
+
+
+
+
+ 自营
+
+
+
广告
+
![]()
+
+
+
+ -
+
+
+
+
+
+ ¥239.00
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥99.00
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥18.80
+
+
+
+
+
+
+
+
+ 新品
+ 秒杀
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥28.00
+
+
+
+
+
+
+
+
+ 放心购
+ 新品
+ 秒杀
+ 免邮
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥749.00
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥36.80
+
+
+
+
+
+
+
+
+ 自营
+ 放心购
+
+
+
广告
+
![]()
+
+
+
+ -
+
+
+
+
+
+ ¥55.00
+
+
+
+
+
+
+
+
+ 自营
+ 闪购
+ 赠
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥59.00
+
+
+
+
+
+
+
+
+ 京东物流
+ 放心购
+ 秒杀
+ 券1000-100
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥119.00
+
+
+
+
+
+
+
+
+ 自营
+ 满79-10
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥69.00
+
+
+
+
+
+
+
+
+ 自营
+ 新品
+
+
+
+
+
+ -
+
+
+
+
+
+ ¥99.00
+
+
+
+
+
+
+
+
+ 自营
+ 闪购
+ 赠
+
+
+
+
+
+
+
+
正在加载中,请稍后~~
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+ 多品类齐全,轻松购物
+
+ -
+ 快多仓直发,极速配送
+
+ -
+ 好正品行货,精致服务
+
+ -
+ 省天天低价,畅选无忧
+
+
+
+
+
+
+
+
+
+
+
+
+