diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore" new file mode 100644 index 0000000000000000000000000000000000000000..26d33521af10bcc7fd8cea344038eaaeb78d0ef5 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/.gitignore" @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml" new file mode 100644 index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/inspectionProfiles/profiles_settings.xml" @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml" new file mode 100644 index 0000000000000000000000000000000000000000..a193443617b9b03a23e930fa3bf830a8fd85af0c --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/jd_crawler_scrapy.iml" @@ -0,0 +1,11 @@ + + + + + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml" new file mode 100644 index 0000000000000000000000000000000000000000..65531ca992813bbfedbe43dfae5a5f4337168ed8 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/misc.xml" @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml" new file mode 100644 index 0000000000000000000000000000000000000000..6aa381902ba946a3dc851e989e0effc53237da20 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/modules.xml" @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml" new file mode 100644 index 0000000000000000000000000000000000000000..4fce1d86b49521afe1cee4ed1c13b6396ebbc6f3 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/.idea/vcs.xml" @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py" new file mode 100644 index 0000000000000000000000000000000000000000..c1d7f45ba092a3053969b7c9e5dfac903da9617e --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/items.py" @@ -0,0 +1,19 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class JdCrawlerScrapyItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + sku_id = scrapy.Field() + img = scrapy.Field() + price = scrapy.Field() + title = scrapy.Field() + shop = scrapy.Field() + icons = scrapy.Field() + sta_data = scrapy.Field() + keyword = scrapy.Field() diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py" new file mode 100644 index 0000000000000000000000000000000000000000..55b92f11e342a3c3b0e03bf5b1edb0a7092725ce --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/middlewares.py" @@ -0,0 +1,173 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals +from scrapy.downloadermiddlewares.retry import RetryMiddleware +from scrapy.utils.response import response_status_message +from scrapy.dupefilters import RFPDupeFilter +import hashlib +import weakref +from w3lib.url import canonicalize_url +from scrapy.utils.python import to_bytes + +_fingerprint_cache = weakref.WeakKeyDictionary() + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class JdCrawlerScrapySpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class JdCrawlerScrapyDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class CookieMiddleware: + def process_request(self, request): + cookie_pool = [] + request.headers['cookies'] = cookie_pool.pop() + def process_response(self,response): + """ + 根据response返回的内容判断当前cookie是否过期 + :param response: + :return: + """ + # if "身份过期" in response.text: + # raise Exception("当前cookie身份已过期") + + def process_exception(self, request, exception, spider): + if isinstance(exception,IndexError): + retry_times = request.meta.get('retry_times') + request.meta['retry_times'] = retry_times - 1 + + return request + +class UAMiddleware: + def process_request(self, request, spider): + """ + 正式请求前添加请求头 + :param request: + :param spider: + :return: + """ + request.headers["user-agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36" + + +class MyRetryMiddleware(RetryMiddleware): + """ + 解决对方服务器返回正常状态码200,但是需要根据IP进行验证码验证的情。 + 可以通过换IP解决,那么就应该重试。 + """ + def process_response(self, request, response, spider): + if request.meta.get('dont_retry', False): + return response + if "验证码" in response.text: + reason = response_status_message(response.status) + return self._retry(request, reason, spider) or response + return response + +class MyRFPDupeFilter(RFPDupeFilter): + def request_fingerprint(self, request, include_headers=None, keep_fragments=False): + cache = _fingerprint_cache.setdefault(request, {}) + cache_key = (include_headers, keep_fragments) + if cache_key not in cache: + fp = hashlib.sha1() + fp.update(to_bytes(request.method)) + fp.update(to_bytes(canonicalize_url(request.url, keep_fragments=keep_fragments))) + fp.update(request.body or b'') + fp.update(request.meta.get("batch_no", "").encode("utf-8")) + cache[cache_key] = fp.hexdigest() + return cache[cache_key] + + + diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py" new file mode 100644 index 0000000000000000000000000000000000000000..0aa2a8955ac60105452e979538ad13e0c56f926e --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/pipelines.py" @@ -0,0 +1,31 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter + +from jd_crawler_scrapy.items import JdCrawlerScrapyItem +import pymysql + +class JdCrawlerScrapyPipeline: + def __init__(self): + self.mysql_con = None + + def process_item(self, item, spider): + if not self.mysql_con: + self.mysql_con = pymysql.connect(**spider.settings["MYSQL_CONF"]) + + if isinstance(item, JdCrawlerScrapyItem): + cursor = self.mysql_con.cursor() + SQL = """INSERT INTO jd_search(sku_id, img, price, title, shop, icons, keyword, sta_date) + VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')""".format( + item['sku_id'], item['img'], item['price'], item['title'], item['shop'], item['icons'], item['keyword'], item['sta_date'] + ) + cursor.execute(SQL) + self.mysql_con.commit() + cursor.close() + + return item diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py" new file mode 100644 index 0000000000000000000000000000000000000000..066217194c3487124795dec380bafafd114ef6b0 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/run.py" @@ -0,0 +1,4 @@ +from scrapy import cmdline + +command = "scrapy crawl jd_search".split() +cmdline.execute(command) \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py" new file mode 100644 index 0000000000000000000000000000000000000000..dc356206d474b61a66ae3d0e6edc6f81ee4a2d75 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/scripts/jd_producer.py" @@ -0,0 +1,27 @@ +import redis +import time +import json + +redis_con = redis.Redis(host="localhost", port=6379, db=5) + +def search_producer(): + for keyword in ["键盘", "耳机", "笔记本电脑"]: + for page_num in range(1, 3): + url = f"https://search.jd.com/Search?keyword={keyword}&page={page_num}" + meta = { + "sta_date": time.strftime("%Y-%m-%d"), + "keyword": keyword, + "page_num": page_num + } + + task = json.dumps({ + "url": url, + "body": '', + "method": "GET", + "meta": meta + }) + redis_con.lpush("jd_search:start_urls", task) + + +if __name__ == "__main__": + search_producer() \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py" new file mode 100644 index 0000000000000000000000000000000000000000..b11634f462a6bfea7be9b0152438a061639b7177 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/settings.py" @@ -0,0 +1,131 @@ +# Scrapy settings for jd_crawler_scrapy project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'jd_crawler_scrapy' + +SPIDER_MODULES = ['jd_crawler_scrapy.spiders'] +NEWSPIDER_MODULE = 'jd_crawler_scrapy.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'jd_crawler_scrapy (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Enable redirect +REDIRECT_ENABLE = False + +# Retry +RETRY_ENABLE = False +RETRY_HTTP_CODES = [500, 502, 503, 504, 408, 429] + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +CONCURRENT_REQUESTS = 1 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 2 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +# DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +# } + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapySpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +DOWNLOADER_MIDDLEWARES = { +# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapyDownloaderMiddleware': 543, + 'jd_crawler_scrapy.middlewares.UAMiddleware': 100, + 'jd_crawler_scrapy.middlewares.MyRetryMiddleware': 200, + 'jd_crawler_scrapy.middlewares.CookieMiddleware': 150, +} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'jd_crawler_scrapy.pipelines.JdCrawlerScrapyPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' + +#MYSQL CONF +MYSQL_CONF = { + "host": "127.0.0.1", + "user": "root", + "password": "lp82nlf", + "db": "wong_mysql" +} + +#LOG +# LOG_FILE = "D:/LOG/jd_search.log" +# LOG_LEVEL = "ERROR" + +# DUPEFILTER = "jd_crawler_scrapy.middlewares.MyRFPDupeFilter" + + +# Scrapy-redis settings 更换调度器 +SCHEDULER = 'scrapy_redis.scheduler.Scheduler' + +# SCHEDULER-QUEUE-CLASS 更换消息队列 +SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue' + +# DUPEFILTER-CLASS 更换过滤器,将请求指纹保存在redis中 +DUPEFILTER_CLASS = 'scrapy.redis.dupefilter.RFPDupeFilter' + +# SCHEDULER-PERSIST 消息队列持久化,不会清空redis中的消息队列 +SCHEDULER_PERSIST = True + + +# Redis settings +REDIS_HOST = 'localhost' +REDIS_PORT = 6379 +REDIS_PARAMS = {"db": 5} \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..ebd689ac51d69c5e1dbbe80083c2b20a39f8bb79 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/__init__.py" @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py" new file mode 100644 index 0000000000000000000000000000000000000000..bd5d38d7df1d37abb45374e01b141502976fb338 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/jd_crawler_scrapy/spiders/jd_search.py" @@ -0,0 +1,61 @@ +import scrapy +from bs4 import BeautifulSoup +import json +import time +from jd_crawler_scrapy.items import JdCrawlerScrapyItem +from scrapy.exceptions import CloseSpider +from scrapy_redis.spiders import RedisSpider + + +class JdSearch(RedisSpider): + name = "jd_search" + redis_key = f"{name}:start_urls" + + def make_request_from_data(self, data): + task = json.loads(data.decode("urf-8")) + return scrapy.http.FormRequest( + url=task["url"], + formdata=json.loads(task["body"]) if task["body"] else '', + method=task["method"], + meta=task["meta"], + dont_filter=False, + callback=self.parse_search, + errback=self.process_error + ) + + def parse_search(self, response): + print(response) + soup = BeautifulSoup(response.text, "lxml") + item_array = soup.select("ul[class='gl-warp clearfix'] li[class='gl-item']") + for item in item_array: + try: + sku_id = item.attrs["data-sku"] + img = item.select("img[data-img='1']") + price = item.select("div[class='p-price']") + title = item.select("div[class='p-name p-name-type-2']") + shop = item.select("div[class='p-shop']") + icons = item.select("div[class='p-icons']") + + img = img[0].attrs['data-lazy-img'] if img else "" + price = price[0].strong.i.text if price else "" + title = title[0].text.strip() if title else "" + shop = shop[0].a.attrs['title'] if shop[0].text.strip() else "" + icons = json.dumps([tag_ele.text for tag_ele in icons[0].select("i")]) if icons else '[]' + + item = JdCrawlerScrapyItem() + item["sku_id"] = sku_id + item["img"] = img + item["price"] = price + item["title"] = title + item["shop"] = shop + item["icons"] = icons + item["sta_date"] = response.meta["sta_date"] + item["keyword"] = response.meta["keyword"] + yield item + + except Exception as e: + print(e.args) + + def process_error(self, failure): + print(failure) + diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg" new file mode 100644 index 0000000000000000000000000000000000000000..b6086855c9bbe4b6c4c9381fdc30da8031610cde --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/jd_crawler_scrapy/scrapy.cfg" @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = jd_crawler_scrapy.settings + +[deploy] +#url = http://localhost:6800/ +project = jd_crawler_scrapy diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/.gitignore" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/.gitignore" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml" new file mode 100644 index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/inspectionProfiles/profiles_settings.xml" @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml" new file mode 100644 index 0000000000000000000000000000000000000000..65531ca992813bbfedbe43dfae5a5f4337168ed8 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/misc.xml" @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml" new file mode 100644 index 0000000000000000000000000000000000000000..61bb8b0f2ce72743b90eec2a20359ca7f8b323cc --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/modules.xml" @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml" new file mode 100644 index 0000000000000000000000000000000000000000..4fce1d86b49521afe1cee4ed1c13b6396ebbc6f3 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/vcs.xml" @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml" new file mode 100644 index 0000000000000000000000000000000000000000..a193443617b9b03a23e930fa3bf830a8fd85af0c --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/week10.iml" @@ -0,0 +1,11 @@ + + + + + + + + + + \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml" new file mode 100644 index 0000000000000000000000000000000000000000..f0f7fccb8ff17ad1c1caecac3ed6fff72cfbaf4e --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week10/.idea/workspace.xml" @@ -0,0 +1,167 @@ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + 我的购物车 +
+
+
+
+ +
+
+
+
+ + + + +
+ + +
+
+
+
+ +
+ > +
+ "鼠标" +
+
+
+
+ +
+ +
+ +
+
+
品牌:
+
+
    +
  • 所有品牌
  • +
  • A
  • +
  • B
  • +
  • C
  • +
  • D
  • +
  • E
  • +
  • F
  • +
  • G
  • +
  • H
  • +
  • I
  • +
  • J
  • +
  • K
  • +
  • L
  • +
  • M
  • +
  • N
  • +
  • O
  • +
  • P
  • +
  • Q
  • +
  • R
  • +
  • S
  • +
  • T
  • +
  • U
  • +
  • W
  • +
  • X
  • +
  • Y
  • +
  • Z
  • +
+
+
+ +
+
已选条件:
    +
    + 确定 + 取消 +
    +
    +
    + 更多 + 多选 +
    +
    +
    +
    +
    +
    外设产品:
    +
    +
    + +
    +
    + 确定 + 取消 +
    +
    +
    + 更多 + +
    +
    +
    + +
    +
    +
    电脑整机:
    +
    +
    + +
    +
    + 确定 + 取消 +
    +
    +
    + 更多 + +
    +
    +
    + +
    +
    +
    连接方式:
    +
    +
    + +
    +
    + 确定 + 取消 +
    +
    +
    + 更多 + 多选 +
    +
    +
    +
    +
    +
    适用场景:
    +
    +
    + +
    +
    + 确定 + 取消 +
    +
    +
    + 更多 + 多选 +
    +
    +
    +
    +
    +
    高级选项:
    + +
    + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +
    +
    + +
    +
    + +
    + + + + + +
    +
    +
    +
    + - +
    +
    +
    + + 清空 + 确定 +
    +
    +
    + + 1/100 + + < + > +
    +
    71万+件商品
    + +
    +
    +
    +
    配送至
    +
    +
    +
    北京
    + +
    +
    +
    +
    +
    +
    +
    + + +
    +
    + + +
    + + + +
    +
    正在加载中,请稍后~~
    +
    +
    +
    +
    +
    +
    + +

    商品精选

    +
    +
      +
    +
    +
    +

    精品推荐

    + +
    +
    + +
    + + +
    +
    +
    +
    +
    + +
    商品精选
    +
    +
    +
    +
    + +
    +
    +
    +
    + +
    +
    +
      +
    1. + 品类齐全,轻松购物 +
    2. +
    3. + 多仓直发,极速配送 +
    4. +
    5. + 正品行货,精致服务 +
    6. +
    7. + 天天低价,畅选无忧 +
    8. +
    +
    +
    +
    +
    +
    +
    购物指南
    +
    + 购物流程 +
    +
    + 会员介绍 +
    +
    + 生活旅行/团购 +
    +
    + 常见问题 +
    +
    + 大家电 +
    +
    + 联系客服 +
    +
    +
    +
    配送方式
    +
    + 上门自提 +
    +
    + 211限时达 +
    +
    + 配送服务查询 +
    +
    + 配送费收取标准 +
    +
    + 海外配送 +
    +
    +
    +
    支付方式
    +
    + 货到付款 +
    +
    + 在线支付 +
    +
    + 分期付款 +
    +
    + 公司转账 +
    +
    +
    +
    售后服务
    +
    + 售后政策 +
    +
    + 价格保护 +
    +
    + 退款说明 +
    +
    + 返修/退换货 +
    +
    + 取消订单 +
    +
    +
    +
    特色服务
    +
    + 夺宝岛 +
    +
    + DIY装机 +
    +
    + 延保服务 +
    +
    + 京东E卡 +
    +
    + 京东通信 +
    +
    + 京鱼座智能 +
    +
    + +
    +
    +
    +
    + + +
    + + + + + diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week11/redis_test.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week11/redis_test.py" new file mode 100644 index 0000000000000000000000000000000000000000..44013f4a34579ae9b95ec4ab8c3486c1300854ce --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week11/redis_test.py" @@ -0,0 +1,93 @@ +import redis + +#连接数据库,指定要使用的database(db=0) +redis_con = redis.Redis(host="127.0.0.1", port=6379, db=0) + + +#String的CRUD操作 +redis_con.set('aa', 1) +redis_con.set('bb', 2) +redis_con.set('cc', 3) + +set_a = redis_con.get('bb') +print(set_a) + +redis_con.set('aa', 33) +set_b = redis_con.get('aa') +print(set_b) + +redis_con.delete('cc') +set_c = redis_con.get('cc') +print(set_c) + + +# # Hash的CRUD操作 +redis_con.hset('hash_test', 'a', 1) +redis_con.hset('hash_test', 'b', 2) +redis_con.hset('hash_test', 'c', 3) + +hash_a = redis_con.hget('hash_test', 'a') +print(hash_a) + +redis_con.hset('hash_test', 'c', 33) +hash_b = redis_con.hget('hash_test', 'c') +print(hash_b) + +redis_con.hdel('hash_test', 'b') +hash_c = redis_con.hget('hash_test', 'b') +print(hash_c) + + +# List的CRUD操作 +redis_con.lpush('list_test', 10, 20, 15, 8) + +list_a = redis_con.lindex('list_test', 2) +print(list_a) + +list_b = redis_con.lrange('list_test', 0, 3) +print(list_b) + +redis_con.linsert('list_test', 'before', 20, 10) + +redis_con.rpop('list_test') +redis_con.lpop('list_test') + +redis_con.lrem('list_test', 2, 15) + + +# Set的CRUD操作 +redis_con.sadd('set_test', 1, 3, 9, 7, 5) + + +set_a = redis_con.smembers('set_test') +print(set_a) + +set_b = redis_con.sismember('set_test', 6) +print(set_b) + +set_c = redis_con.sismember('set_test', 3) +print(set_c) + +set_d = redis_con.srandmember('set_test', 3) +print(set_d) + +redis_con.spop('set_test', 2) + +# Zset的CRUD操作 +redis_con.zadd('zset_test', {'a': 10, 'b': 20, 'c': 60, 'd': 30, 'e': 40 }) + +zset_a = redis_con.zcard('zset_test') +print(zset_a) + +zset_b = redis_con.zcount('zset_test', 20, 40) +print(zset_b) + +zset_c = redis_con.zrangebyscore('zset_test', 20, 40) +print(zset_c) + +print(redis_con.zscore('zset_test', 'b')) +redis_con.zincrby('zset_test', 5, 'b') +print(redis_con.zscore('zset_test', 'b')) + +redis_con.zremrangebyrank('zset_test', 2, 3) +redis_con.zremrangebyscore('zset_test', 10, 30) diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week12/class1.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/2\347\217\255/2\347\217\255_Wong/week12/class1.py" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391