diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/.keep" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/.keep" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/items.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/items.py" new file mode 100644 index 0000000000000000000000000000000000000000..9d6efdf8b8f8d8f89ce6cf9e041154b1ca2ba69e --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/items.py" @@ -0,0 +1,16 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class JdCrawlerScrapyItem(scrapy.Item): + # define the fields for your item here like: + sku_id = scrapy.Field() + img = scrapy.Field() + price = scrapy.Field() + title = scrapy.Field() + shop = scrapy.Field() + icons = scrapy.Field() diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/middlewares.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/middlewares.py" new file mode 100644 index 0000000000000000000000000000000000000000..3b6a2c4adc17a8ef5ebe27fcd44115514c3e2000 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/middlewares.py" @@ -0,0 +1,180 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html +import hashlib +import weakref + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter +from scrapy.downloadermiddlewares.retry import RetryMiddleware +from scrapy.utils.response import response_status_message +from scrapy.dupefilters import RFPDupeFilter +from scrapy.utils.python import to_bytes +from w3lib.url import canonicalize_url + + +class JdSearchSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class JdSearchDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class JdSearchUAMiddleware: + + def process_request(self, request, spider): + # This method is used by Scrapy to add user agent headers. + request.headers["user-agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36" + + +# W11_L1 Retry 中间件 +class JdSearchRetryMiddleware(RetryMiddleware): + + def process_response(self, request, response, spider): + if request.meta.get('dont_retry', False): + return response + if "验证码" in response.text: + reason = response_status_message(response.status) + return self._retry(request, reason, spider) or response + return response + + def process_exception(self, request, exception, spider): + if isinstance(exception, IndexError): + retry_times = request.meta.get('retry_times', 1) + request.meta['retry_time'] = retry_times - 1 + return request + + +# W11_L1 filter +_fingerprint_cache = weakref.WeakKeyDictionary() +class JdSearchRfpDupeMiddleware(RFPDupeFilter): + def request_fingerprint(self, request, include_headers=None, keep_fragments=False): + cache = _fingerprint_cache.setdefault(request, {}) + cache_key = (include_headers, keep_fragments) + if cache_key not in cache: + fp = hashlib.sha1() + fp.update(to_bytes(request.method)) + fp.update(to_bytes(canonicalize_url(request.url, keep_fragments=keep_fragments))) + fp.update(request.body or b'') + fp.update(request.meta.get("batch_no", "").encode("utf-8")) + cache[cache_key] = fp.hexdigest() + return cache[cache_key] + + +# W11_L2 异常处理重试中间件,处理 http code 为 403/500 时候的异常 +from twisted.internet import defer +from twisted.internet.error import DNSLookupError, TimeoutError, \ + ConnectionLost, ConnectionDone, ConnectError, TCPTimedOutError, \ + ConnectionRefusedError +from scrapy.http import HtmlResponse +from twisted.web.client import ResponseFailed +from scrapy.core.downloader.handlers.http11 import TunnelError + +class ProcessAllExceptionMiddleware(object): + ALL_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError, + ConnectionRefusedError, ConnectionDone, ConnectError, + ConnectionLost, TCPTimedOutError, ResponseFailed, + IOError, TunnelError) + + def process_response(self, request, response, spider): + if str(response.status).startswith('4') or str(response.status).startswith('5'): + response = HtmlResponse(url='') + return response + return response + + def process_exception(self, request, exception, spider): + if isinstance(exception, self.ALL_EXCEPTIONS): + print("get exception %s" % exception) + response = HtmlResponse(url='exception') + return response + print("no exception") \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/pipelines.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/pipelines.py" new file mode 100644 index 0000000000000000000000000000000000000000..b4394ab0d543e7de925abf03e34d1a2851eaefaa --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/pipelines.py" @@ -0,0 +1,30 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter +from jd_crawler_scrapy.items import JdCrawlerScrapyItem +import pymysql + + +class JdCrawlerScrapyPipeline: + def __init__(self): + self.mysql_con = None + + def process_item(self, item, spider): + if not self.mysql_con: + self.mysql_con = pymysql.connect(**spider.settings["MYSQL_CONF"]) + + if isinstance(item, JdCrawlerScrapyItem): + cursor = self.mysql_con.cursor() + SQL = """INSERT INTO jd_search(sku_id, img, price, title, shop, icons) + VALUES ('{}', '{}', '{}', '{}', '{}', '{}')""".format( + item['sku_id'], item['img'], item['price'], item['title'], item['shop'], item['icons'] + ) + cursor.execute(SQL) + self.mysql_con.commit() + cursor.close() + return item diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/run.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/run.py" new file mode 100644 index 0000000000000000000000000000000000000000..d65b88a0b2aeb9dfeeb123844ab46fd82295043f --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/run.py" @@ -0,0 +1,4 @@ +from scrapy import cmdline + +command = "scrapy crawl jd_search".split() +cmdline.execute(command) \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/settings.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/settings.py" new file mode 100644 index 0000000000000000000000000000000000000000..8e4415b059f15d1921568c2be4c3cbb52d80819b --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/settings.py" @@ -0,0 +1,113 @@ +# Scrapy settings for jd_crawler_scrapy project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'jd_crawler_scrapy' + +SPIDER_MODULES = ['jd_crawler_scrapy.spiders'] +NEWSPIDER_MODULE = 'jd_crawler_scrapy.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'jd_crawler_scrapy (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Enable redirect +REDIRECT_ENABLE = False + +# Retry +RETRY_ENABLE = False +RETRY_HTTP_CODES = [500, 502, 503, 504, 408, 429] + + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +CONCURRENT_REQUESTS = 1 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +# CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'jd_crawler_scrapy.middlewares.JdCrawlerScrapySpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +DOWNLOADER_MIDDLEWARES = { + # 'jd_crawler_scrapy.middlewares.JdCrawlerScrapyDownloaderMiddleware': 543, + 'jd_crawler_scrapy.middlewares.UAMiddleware': 100, + 'jd_crawler_scrapy.middlewares.MyRetryMiddleware': 200, +} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'jd_crawler_scrapy.pipelines.JdCrawlerScrapyPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' + +# MYSQL CONF +MYSQL_CONF = { + "host": "127.0.0.1", + "user": "root", + "password": "qwe369", + "db": "tunan_class" +} + +# LOG +LOG_FILE = "H:/log/jd_search.log" +LOG_LEVEL = "ERROR" + +# DUP +DUPEFILTER_CLASS = "jd_crawler_scrapy.middlewares.MyRFPDupeFilter" \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/spiders/jd_search.py" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/spiders/jd_search.py" new file mode 100644 index 0000000000000000000000000000000000000000..8fa2525c049d9630ae92115cfb0c63211723d633 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\200\344\272\214\350\212\202/jd_crawler_scrapy/spiders/jd_search.py" @@ -0,0 +1,64 @@ +import scrapy +from bs4 import BeautifulSoup +import json +from items import JdCrawlerScrapyItem +from twisted.internet.error import TimeoutError +from scrapy.spidermiddlewares.httperror import HttpError + + +class JdSearchSpider(scrapy.Spider): + name = "jd_search" + + def start_requests(self): + search_array = ["手机", "电脑", "显卡", "内存"] + for keyword in search_array: + for page in range(1, 4): + url = f'https://search.jd.com/Search?keyword={keyword}&page={page}' + + yield scrapy.FormRequest( + dont_filter=False, + url=url, + method='GET', + errback=self.err_back, + callback=self.parse_search + ) + + + def parse_search(self, response): + html = response.text + soup = BeautifulSoup(html, 'lxml') + content = soup.select("ul[class='gl-warp clearfix'] li[class='gl-item']") + for item in content: + try: + sku_id = item.attrs["data-sku"] + img = item.select("img[data-img='1']") + price = item.select("div[class='p-price']") + title = item.select("div[class='p-name p-name-type-2'] em") + shop = item.select("div[class='p-shop']") + icons = item.select("div[class='p-icons']") + + img = img[0].attrs['data-lazy-img'] if img else "" + price = price[0].strong.i.text.strip() if price else "" + title = title[0].text.strip() if title else "" + shop = shop[0].text.strip() if shop else "" + icons = json.dumps([ele.text.strip() for ele in icons[0].select('i')]) if icons else '[]' + + items = JdCrawlerScrapyItem() + items["sku_id"] = sku_id + items["img"] = img + items["price"] = price + items["title"] = title + items["shop"] = shop + items["icons"] = icons + yield items + + except Exception as e: + print(e.args) + + + def err_back(self, failure): + if failure.check(TimeoutError): + print('Timeout error on %s website', failure.request.url) + elif failure.check(HttpError): + response = failure.value.response + print('HttpError on %s' % response.url) \ No newline at end of file diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\211\350\212\202/.keep" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\211\350\212\202/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\211\350\212\202/radis\345\237\272\347\241\200.md" "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\211\350\212\202/radis\345\237\272\347\241\200.md" new file mode 100644 index 0000000000000000000000000000000000000000..cdc2b991a04a439cae21172129e8f5d5d999e968 --- /dev/null +++ "b/\347\254\254\344\272\214\346\234\237\350\256\255\347\273\203\350\220\245/4\347\217\255/4\347\217\255_LiPing/\347\254\254\345\215\201\344\270\200\345\221\250/\347\254\254\344\270\211\350\212\202/radis\345\237\272\347\241\200.md" @@ -0,0 +1,306 @@ +# 第十一周-第三节课 + +## redis简单介绍 + +``` +https://redis.io/ +``` + +- 什么是redis? + + `remote dictionary server`, 远程字典服务. 顾名思义, redis的基础存储方式是键对值, 是一种常用的非关系型数据库. + +- redis的特点 + + - 内存操作, 相对于传统数据库从硬盘上读取数据速度, 快了一个数量级. + + - redis服务是**单线程工作模式**, 不会产生一般的并发问题. + + 在我们使用多线程或者多进程去进行一般IO操作会发生读写冲突. 如果我们的多线程和多进程程序去读取redis消息队列当中的值时, 由于redis是单线程工作模式, 读事件和写事件都会按顺序执行, 避免了并发问题. + +## redis的安装 + +- 服务端 + + - windows + + ``` + https://github.com/microsoftarchive/redis + ``` + + - ubuntu + + ``` + apt-get install redis-server + ``` + + - mac + + ``` + brew install redis + ``` + +- 客户端 + + - windows + + - redis-desktop + + - ubuntu + + ``` + # 没有图形界面 + apt-get install redis-cli + ``` + + - mac + + ``` + medis + ``` + + - python连接 + + ``` + pip install redis + ``` + + ``` + import redis + + # 指定使用的database + redis_con = redis.Redis(host='localhost', port=6379, db=2) + ``` + +## redis的数据类型和CRUD + +``` +通过redis-cli命名进行连接 + +redis-cli -h localhost -p 6379 + +# 选择使用的数据库 +C:\Users\TuNan>redis-cli +127.0.0.1:6379> select 2 +OK +127.0.0.1:6379[2]> + +# 查看当前库下所有的key +127.0.0.1:6379[2]> keys * +``` + +- String + + - Create + + ``` + set key value + ``` + + - Retrieve + + ``` + get key + ``` + + - Update + + ``` + set key value + ``` + + - Delete + + ``` + del key + ``` + +- Hash + + - Create + + ``` + hset 变量名 key value + ``` + + - Retrieve + + ``` + hget 变量名 key + ``` + + - Update + + ``` + hset 变量名 key value + ``` + + - Delete + + ``` + hdel 变量名 key + ``` + +- List + + - Create + + ``` + lpush 变量名 value + ``` + + - Retrieve + + - 通过索引取值 + + ``` + lindex 变量名 索引值 + + lindex 'list_test' 1 + ``` + + - 通过索引取一定范围的值 + + ``` + lrange 变量名 start end + + lrange 'list_test' 0 2 + ``` + + - Update + + ``` + linsert 变量名 before[after] pivot value + + linsert 'list_test' before 'a' 'b' + ``` + + - Delete + + - rpop + + 从右侧, 也就是list**的末尾移除一个元素并返回** + + ``` + rpop 'list_test' + ``` + + - lpop + + 从左侧, 也就是头部移除一个元素并返回 + + - lrem + + 删除一定数目的元素 + + ``` + lrem 变量名 删除个数, 删除元素 + ``` + +- Set + + - Create + + ``` + sadd 变量名 value + ``` + + - Retrieve + + - 获取集合中的所有成员 + + ``` + smembers 变量名 + ``` + + - 判断当前元素是否在集合当中 + + ``` + sismember 变量名 value + sismember 'set_test' 2 + ``` + + - 返回随机数 + + ``` + srandmember 变量名 随机返回的数量 + srandmember 'set_test' 2 + ``` + + - Update + + 无 + + - Delete + + ``` + spop 变量名 + ``` + +- Zset + + 有序集合 + + > 可以通过有序集合完成一个复杂的优先级队列 + + - create + + ``` + zadd 变量名 分数 value + ``` + + - Retrieve + + - 返回当前成员数 + + ``` + zcard 'zset_test' + ``` + + - 获取一定分数区间的成员数 + + ``` + zcount 变量名 min_score max_score + ``` + + - 通过一定分数区间获取值 + + ``` + zrangebyscore 变量名 min_score max_score + + zrangebyscore 'zset_test' 2 1 + ``` + + - Update + + - 加分 + + ``` + zincrby 变量 加分值 value + + zincrby 'zset_test' 1 'b' + ``` + + - Delete + + - 根据排名移除成员 + + ``` + zremrangebyrank 变量名 start stop + + zremrangebyrank 'zset_test' 1 2 + ``` + + - 根据分数移除成员 + + ``` + zremrangebyscore 变量名 min_score max_score + + zremrangebyscore 'zset_test' 2 2 + ``` + +## 课后作业 + +- 完成redis服务端和客户端的搭建 +- 练习String, List, Hash, Set的CRUD练习 +- 扩展: 练习Zset的CRUD \ No newline at end of file