From 667930cb4556643f3eb2c0905125db28ae64bc19 Mon Sep 17 00:00:00 2001 From: kunkun Date: Sun, 19 Apr 2020 02:30:49 +0800 Subject: [PATCH 01/13] kun --- config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/__init__.py b/config/__init__.py index 776ce2f..418fa21 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -88,7 +88,7 @@ email['recNick']='' #默认收件人昵称 kcweb={} kcweb['name']='kcweb' #项目的名称 -kcweb['version']='2.40.2' #项目版本 +kcweb['version']='2.40.3' #项目版本 kcweb['description']='基于python后端开发框架' #项目的简单描述 kcweb['long_description']='' #项目详细描述 kcweb['license']='MIT' #开源协议 mit开源 -- Gitee From 3ba03eae870b0c5282d27204cbf0f2daefb76d09 Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 5 May 2020 19:27:39 +0800 Subject: [PATCH 02/13] kun --- app.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index ac8f9ae..70a6f1e 100644 --- a/app.py +++ b/app.py @@ -149,7 +149,8 @@ class web: 'BODY_DATA':'' } status,resheader,body=self.__routes(reqheader) - print(body) + if 'body' not in body and 'html' not in body and '<' not in body and '>' not in body: + print(body) exit() def __impl(self,host,port,filename): "运行测试服务器" @@ -355,7 +356,7 @@ class web: resheader={"Content-Type":"text/html; charset=utf-8"} web.__set_globals(self,header) PATH_INFO=header['PATH_INFO'].split('/') - if PATH_INFO[0]==' ': + if PATH_INFO[0]==' ' or PATH_INFO[0]=='': del PATH_INFO[0] methods,modular,edition,files,funct,param=web.defaultroute(self,header,PATH_INFO) if header['REQUEST_METHOD'] in methods: -- Gitee From d14ad1368f1caff5aa3a5917e3a2750f832a42bf Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 17:55:56 +0800 Subject: [PATCH 03/13] kun --- app.py | 47 +++++++++++------- application/api/common/__init__.py | 4 +- application/api/common/autoload.py | 2 + application/common/__init__.py | 4 +- application/common/autoload.py | 5 ++ common/autoload.py | 77 +++++++++++++++++------------- common/request.py | 11 +++++ config/__init__.py | 2 +- create.py | 4 ++ utill/http.py | 6 +++ 10 files changed, 104 insertions(+), 58 deletions(-) create mode 100644 application/api/common/autoload.py create mode 100644 application/common/autoload.py diff --git a/app.py b/app.py index 70a6f1e..53cb075 100644 --- a/app.py +++ b/app.py @@ -53,7 +53,10 @@ class web: ] else: status,resheader,body=self.__routes(self,env) - body=bytes(body, encoding='utf-8') + if type(body) is bytes: + pass + else: + body=bytes(body, encoding='utf-8') # print(env['bodydata']) # print("\n\nwsgi.input",env['wsgi.input']) # print("\n\ndir(env['wsgi.input'])",dir(env['wsgi.input'])) @@ -295,21 +298,22 @@ class web: i+=1 else: #完全默认 routedefault,PATH_INFO=web.__getconfigroute(self,PATH_INFO,header) - for path in PATH_INFO: - if path: - if i==0: - modular=path - elif i==1: - edition=path - elif i==2: - files=path - urls=urls+"/"+str(path) - elif i==3: - funct=path - urls=urls+"/"+str(path) - else: - param.append(urllib.parse.unquote(path)) - i+=1 + if routedefault: #使用默认路由 + for path in PATH_INFO: + if path: + if i==0: + modular=path + elif i==1: + edition=path + elif i==2: + files=path + urls=urls+"/"+str(path) + elif i==3: + funct=path + urls=urls+"/"+str(path) + else: + param.append(urllib.parse.unquote(path)) + i+=1 #默认路由end ############################################################ return methods,modular,edition,files,funct,tuple(param) def __tran(self,data,status,resheader): @@ -575,7 +579,11 @@ class web: if isinstance(resheaders,list): if not body: body='' - return str(status),resheaders,str(body) + if type(body) is bytes: + pass + else: + body=str(body) + return str(status),resheaders,body else: raise Exception() def __tpl(**context): @@ -680,7 +688,10 @@ class web: header+="Content-Length:%d\n" % len(body) else: status,resheader,body=self.__routes(reqheader) - body=body.encode() + if type(body) is bytes: + pass + else: + body=body.encode() header="HTTP/1.1 %s \n" % status header+="Content-Length:%d\n" % len(body) diff --git a/application/api/common/__init__.py b/application/api/common/__init__.py index 33d1e0d..0917cdf 100644 --- a/application/api/common/__init__.py +++ b/application/api/common/__init__.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -from ${appname}.common import * -#下面的方法在当前模块中有效 -G=globals.G +from .autoload import * #下面的方法在当前模块中有效 def before_request(): G.userinfo=get_session("userinfo") diff --git a/application/api/common/autoload.py b/application/api/common/autoload.py new file mode 100644 index 0000000..05965c2 --- /dev/null +++ b/application/api/common/autoload.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from ${appname}.common import * \ No newline at end of file diff --git a/application/common/__init__.py b/application/common/__init__.py index 39b5f60..05e1c79 100644 --- a/application/common/__init__.py +++ b/application/common/__init__.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -from kcweb.common import * -from ${appname} import config -import math +from .autoload import * def return_list(lists,count,pagenow,pagesize): """返回分页列表 diff --git a/application/common/autoload.py b/application/common/autoload.py new file mode 100644 index 0000000..99a5719 --- /dev/null +++ b/application/common/autoload.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from kcweb.common import * +from ${appname} import config +import math,random +G=globals.G \ No newline at end of file diff --git a/common/autoload.py b/common/autoload.py index 6512367..904761a 100644 --- a/common/autoload.py +++ b/common/autoload.py @@ -18,7 +18,18 @@ from email.utils import formataddr from . import globals redis=kcwredis() def send_mail(user,text="邮件内容",theme="邮件主题",recNick="收件人昵称"): - "发送邮件" + """发送邮件 + + 参数 user:接收邮件的邮箱地址 + + 参数 text:邮件内容 + + 参数 theme:邮件主题 + + 参数 recNick:收件人昵称 + + return Boolean类型 + """ ret=True if not theme: theme=config.email['theme'] @@ -39,8 +50,11 @@ def send_mail(user,text="邮件内容",theme="邮件主题",recNick="收件人 return ret get_sysinfodesffafew=None def get_sysinfo(): + """获取系统信息 + + return dict类型 + """ global get_sysinfodesffafew - "获取系统信息" if get_sysinfodesffafew: sysinfo=get_sysinfodesffafew else: @@ -94,7 +108,7 @@ def sqlite(table=None,configss=None): 参数 table:表名 - 参数 config 数据库配置 可以传数据库名字符串 + 参数 configss 数据库配置 可以传数据库名字符串 """ dbs=kcwsqlite.sqlite() if table is None: @@ -108,7 +122,7 @@ def M(table=None,confi=None): 参数 table:表名 - 参数 config 数据库配置 可以传数据库名字符串 + 参数 confi 数据库配置 可以传数据库名字符串 """ if confi: if confi['type']=='sqlite': @@ -136,9 +150,12 @@ def mongo(table=None,configss=None): return mObj.connect(config.mongo).table(table) def is_index(params,index): """判断列表或字典里的索引是否存在 + params 列表或字典 + index 索引值 - return True/False + + return Boolean类型 """ try: params[index] @@ -151,35 +168,47 @@ def is_index(params,index): def set_cache(name,values,expire="no"): """设置缓存 - 参数 name:缓存名 + 参数 name:缓存名 - 参数 values:缓存值 + 参数 values:缓存值 - 参数 expire:缓存有效期 0表示永久 单位 秒 - - return Boolean类型 - """ + 参数 expire:缓存有效期 0表示永久 单位 秒 + + return Boolean类型 + """ return kcwcache.cache().set_cache(name,values,expire) def get_cache(name): """获取缓存 + 参数 name:缓存名 + return 或者的值 """ return kcwcache.cache().get_cache(name) def del_cache(name): """删除缓存 + 参数 name:缓存名 + return Boolean类型 """ return kcwcache.cache().del_cache(name) def md5(strs): - """md5加密""" + """md5加密 + + 参数 strs:要加密的字符串 + + return String类型 + """ m = hashlib.md5() b = strs.encode(encoding='utf-8') m.update(b) return m.hexdigest() def times(): - """时间戳 精确到秒""" + """生成时间戳整数 精确到秒(10位数字) + + return int类型 + """ return int(time.time()) def json_decode(strs): """json字符串转python类型""" @@ -188,7 +217,7 @@ def json_decode(strs): except Exception: return {} def json_encode(strs): - """转成字符串""" + """python列表或字典转成字符串""" try: return json.dumps(strs,ensure_ascii=False) except Exception: @@ -290,22 +319,4 @@ def list_to_tree(data, pk = 'id', pid = 'pid', child = 'lowerlist', root=0,child if kkkk: v[child]=kkkk arr.append(v) - return arr -def get_url(url=''): - "获取版本下的url" - retstr='/' - HTTP_HOST=globals.HEADER.HTTP_HOST.split(".")[0] - route=config.route - modular=None - if route['modular']: - for mk in route['modular']: - if HTTP_HOST in mk: - modular=mk[HTTP_HOST] - if not modular: - retstr=retstr+globals.HEADER.PATH_INFO.split("/")[1]+"/" - if not route['edition']: - retstr=retstr+globals.HEADER.PATH_INFO.split("/")[2]+"/" - if not route['edition']: - retstr=retstr+globals.HEADER.PATH_INFO.split("/")[1]+"/" - retstr=retstr+url - return retstr \ No newline at end of file + return arr \ No newline at end of file diff --git a/common/request.py b/common/request.py index c2cbb47..3b2582a 100644 --- a/common/request.py +++ b/common/request.py @@ -22,6 +22,17 @@ class froms: except: k=None return k +class HEADER: + def Method(): + return kcwglobals.HEADER.Method + def URL(): + return kcwglobals.HEADER.URL + def PATH_INFO(): + return kcwglobals.HEADER.PATH_INFO + def SERVER_PROTOCOL(): + return kcwglobals.HEADER.SERVER_PROTOCOL + def HTTP_HOST(): + return kcwglobals.HEADER.HTTP_HOST def get_data(): "获取请求参数体" return kcwglobals.HEADER.BODY_DATA diff --git a/config/__init__.py b/config/__init__.py index 418fa21..554752a 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -88,7 +88,7 @@ email['recNick']='' #默认收件人昵称 kcweb={} kcweb['name']='kcweb' #项目的名称 -kcweb['version']='2.40.3' #项目版本 +kcweb['version']='2.40.7' #项目版本 kcweb['description']='基于python后端开发框架' #项目的简单描述 kcweb['long_description']='' #项目详细描述 kcweb['license']='MIT' #开源协议 mit开源 diff --git a/create.py b/create.py index 00686ed..f099bef 100644 --- a/create.py +++ b/create.py @@ -23,6 +23,10 @@ class create: content=Template(self.path+"/application/common/__init__.py",appname=appname,modular=modular) f.write(content) f.close() + f=open(self.appname+"/common/autoload.py","w+",encoding='utf-8') + content=Template(self.path+"/application/common/autoload.py",appname=appname,modular=modular) + f.write(content) + f.close() if not os.path.exists(self.appname+"/config"): os.makedirs(self.appname+"/config") f=open(self.appname+"/config/__init__.py","w+",encoding='utf-8') diff --git a/utill/http.py b/utill/http.py index ad04124..de2bc6a 100644 --- a/utill/http.py +++ b/utill/http.py @@ -15,6 +15,7 @@ class Http: get_header={} #响应头 get_cookies={} #得到最后的响应cookie + get_cookie_str='' #得到最后的响应cookie 字符串 get_text='' #得到body响应内容 get_content='' #得到二进制内容 get_response='' #得到响应对象 @@ -53,6 +54,11 @@ class Http: self.get_cookies=cookie if self.set_cookies: self.get_cookies=self.__merge(self.set_cookies,self.get_cookies) + if self.get_cookies: + cookies='' + for key in self.get_cookies: + cookies=cookies+key+"="+self.get_cookies[key]+";" + self.get_cookie_str=cookies self.get_text=response.text self.get_content=response.content self.get_response=response -- Gitee From be269c6176e1cd0631c652801a77ac8dbd7035a6 Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 18:52:01 +0800 Subject: [PATCH 04/13] kun --- README.md | 5 ----- common/autoload.py | 1 + utill/http.py | 14 +++++++------- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index d9f599c..7dee6dd 100644 --- a/README.md +++ b/README.md @@ -34,11 +34,6 @@ create("app","api") # 创建项目 │ │ │ │ ├─index │ │ │ │ │ ├─index.html 模块文件 │ │ ├─__init__.py 控制器初始化文件 -│ ├─script 命令行脚本 -│ │ ├─common 该模块的公共函数目录 -│ │ │ ├─__init__.py 函数文件 -│ │ │ ├─win.py 类文件 -│ │ ├─test.py 脚本文件 │ ├─static 静态资源目录 │ ├─runtime 缓存目录 │ ├─__init__.py 自动导入模块文件 diff --git a/common/autoload.py b/common/autoload.py index 904761a..e3fc4a3 100644 --- a/common/autoload.py +++ b/common/autoload.py @@ -16,6 +16,7 @@ import smtplib from email.mime.text import MIMEText from email.utils import formataddr from . import globals + redis=kcwredis() def send_mail(user,text="邮件内容",theme="邮件主题",recNick="收件人昵称"): """发送邮件 diff --git a/utill/http.py b/utill/http.py index de2bc6a..f8f5c61 100644 --- a/utill/http.py +++ b/utill/http.py @@ -13,13 +13,13 @@ class Http: set_encoding="utf-8" #设置text输出编码 set_session=True #是否启用会话 - get_header={} #响应头 - get_cookies={} #得到最后的响应cookie - get_cookie_str='' #得到最后的响应cookie 字符串 - get_text='' #得到body响应内容 - get_content='' #得到二进制内容 - get_response='' #得到响应对象 - get_status_code=None #响应状态码 + get_header={} #获取响应头 + get_cookies={} #获取最后的响应cookie + get_cookie_str='' #获取最后的响应cookie 字符串 + get_text='' #获取body响应内容 + get_content='' #获取二进制内容 + get_response='' #获取响应对象 + get_status_code=None #获取响应状态码 req=None def gettext(self): -- Gitee From 7c2acea7c4e371b8d77d2663fcf3eafec658ee67 Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 21:19:27 +0800 Subject: [PATCH 05/13] kun --- application/common/__init__.py | 1 + common/autoload.py | 2 +- utill/Queues | Bin 0 -> 12288 bytes utill/db/sqlitedata/kcwlicuxweb | 0 utill/queues.py | 101 ++++++++++++++++++++++++++++++++ 5 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 utill/Queues create mode 100644 utill/db/sqlitedata/kcwlicuxweb create mode 100644 utill/queues.py diff --git a/application/common/__init__.py b/application/common/__init__.py index 05e1c79..e9df085 100644 --- a/application/common/__init__.py +++ b/application/common/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from .autoload import * +Queues.start() #开启队列监听 def return_list(lists,count,pagenow,pagesize): """返回分页列表 diff --git a/common/autoload.py b/common/autoload.py index e3fc4a3..4b917d1 100644 --- a/common/autoload.py +++ b/common/autoload.py @@ -9,9 +9,9 @@ from kcweb.utill.db import sqlite as kcwsqlite from kcweb.utill.cache import cache as kcwcache from kcweb.utill.redis import redis as kcwredis from kcweb.utill.http import Http +from kcweb.utill.queues import Queues from kcweb.utill.db import model from mako.template import Template as kcwTemplate - import smtplib from email.mime.text import MIMEText from email.utils import formataddr diff --git a/utill/Queues b/utill/Queues new file mode 100644 index 0000000000000000000000000000000000000000..0050cc420f96bc135d0aebe31526475db965bf56 GIT binary patch literal 12288 zcmeI2O>9(E6vt;M=@*@1OblY8JUeZCm}|ymg1;-0{Qs z=y_-^9zPL}`qigBIw1PQ(*& zZsl`L8)4;Iv2`XCyC+-Cmzz_?k7@KbG+dYZ5?rl!C zMvHkn_cq@0+SpY2%GlIa&PIxtn&V^B6Rl#)mQ28LGxLXI9!3ch9R`Lt=VxbJoXdk5 z5vIdne=Nj2-CMuQ?)cdwb8i)`=IgkQf7UWu#M#+nv#T%K5awr&+-BVxKC=95jt~8{ z_1lL|t-m~cWBAMAL&Fa>er=p@>}m84EexF+66^k0_szPvt{D7n@RPv z(f?QfxBW-^NBXYzecty*-vhnB^nTnseh-+qT4JR@r9h>?U7*1D{R6eVn;Z4bjkSh> zGBOAyh*6OU#;x#7X`n#{8QqO5pI*Q8<<%=!mOi?$_S%`Ru3tF2bm{E156{2(-M;yf zuW&Xs)>7t^HNc#w(ghu@ktUevSn`|*DemN~`TgRVrSCtudif$k-YZ##7^#>}j)~|Z zk;-`Bpfy4zPATzj5Sk&tg10e=;3zO}l~P7=sS-(!b45d!nUD;jQaYWyx7-s&y>Ng~ z zByC+pL8Re(?Ekjf^oG9m+Gf`-7wXw=`PpwlvA*KuDsSL5q5UOYdpCCA|lMNVgba4`1 zaNpS{C(DrxK{Z2&5v(_fglIWK<9lf+L>m?D9G#Qp)(lCC-~@QAd@;n#^nkGTMrgCEMiW85nAh@sI^$@J*ef{z*C!J zSGS&pa%Y}Q5*pC@1e-uc zVvJA3I!d8Slow0L5YX8F!D=2IWz6F)!!l)rVU%+01)Xznu{5_KMoLR@@s8onA|{S@ z3l{UyYZhD==P#5xLvRCNm<>W=Jwu~vhtZPS3KJn=`R*K@g>kV0LDNd>wN}{qsPcwM zN^yg6bF1M2b2_$l(tnv2w~o7(_g%PO`akBsBS7OS&xG^kqKtW)z$@%NAu$q?Fd})C z0<`t7o@*DsSo-YimHiuTx)zq7|Lg1jtl_IVDg`P9Dg`P9Dg`P9Dg`P9Dg`P9Dg`P9 VDg`P9?ji*S>pdImEB_C%^KaQRln(#^ literal 0 HcmV?d00001 diff --git a/utill/db/sqlitedata/kcwlicuxweb b/utill/db/sqlitedata/kcwlicuxweb new file mode 100644 index 0000000..e69de29 diff --git a/utill/queues.py b/utill/queues.py new file mode 100644 index 0000000..5dd9d70 --- /dev/null +++ b/utill/queues.py @@ -0,0 +1,101 @@ +from queue import Queue +from .db import model +from .db import sqlite as kcwsqlite +import threading,time,os,hashlib,random +queuesdbpath=os.path.split(os.path.realpath(__file__))[0]+"/Queues" +class model_task(model.model): + "任务" + config={'type':'sqlite','db':queuesdbpath} + model.dbtype.conf=config + table="Queues" + fields={ + "id":model.dbtype.int(LEN=11,PRI=True,A_L=True), #设置id为自增主键 + "taskid":model.dbtype.varchar(LEN=32,DEFAULT=''), #设置id为自增主键 + "title":model.dbtype.varchar(LEN=1024,DEFAULT=''), #名称 + "describes":model.dbtype.varchar(LEN=2048,DEFAULT=''), #描述 + "code":model.dbtype.int(LEN=11,DEFAULT=2), #状态码 0成功 1失败 2等待中 3正在执行 4完成 + "msg":model.dbtype.text(), #状态描述 + "error":model.dbtype.text(), #异常信息 + "addtime":model.dbtype.int(LEN=11,DEFAULT=0) #添加时间 + } +class Queues(): + __globalqueue=None + def start(): + Queues.__globalqueue=Queue() + t=threading.Thread(target=Queues.__messagequeue) + t.daemon=True + t.start() + def __messagequeue(): + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where(True).delete() + while True: + if not Queues.__globalqueue.empty(): + value=Queues.__globalqueue.get() + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":3,"msg":"正在执行","error":""}) + if value['args']: + try: + value['target'](*value['args']) + except Exception as e: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) + else: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) + else: + try: + value['target']() + except Exception as e: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) + else: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) + else: + time.sleep(1) + def insert(target,args=None,title="默认任务",describes="",msg='等待中'): #add_queue + """添加队列 + + target 方法名 必须 + + args 方法参数 非必须 如 + + title 任务名称 + + describes 任务描述 + + return taskid + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + ttt=int(time.time()) + print(ttt) + m = hashlib.md5() + m.update((str(ttt)+str(random.randint(100000,999999))).encode(encoding='utf-8')) + taskid=m.hexdigest() + task={"taskid":taskid,"title":title,"describes":describes,"code":2,"msg":msg,"error":"","addtime":ttt} + key={"target":target,"args":args,"task":task} + Queues.__globalqueue.put(key) + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").insert(task) + return taskid + def getall(code=''): + """获取全部队列 + + code 1获取失败的任务 2获取等待中的任务 3获取正在执行中的任务 4获取执行完成的任务 + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + where=False + if code: + where="code="+code + # else: + # where="code!=4" + return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where(where).select() + def status(taskid): + """获取任务状态 + + taskid 任务id + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where("taskid",taskid).find() \ No newline at end of file -- Gitee From 8a2aa1135518a33b76ea7e4caf95a45029a08080 Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 21:38:26 +0800 Subject: [PATCH 06/13] kun --- .gitignore | 3 ++- utill/Queues | Bin 12288 -> 12288 bytes 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 9d3fd53..c438f3b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ __pycache__ /.vscode /dist /kcweb.egg-info -/file \ No newline at end of file +/file +/utill/db/Queues \ No newline at end of file diff --git a/utill/Queues b/utill/Queues index 0050cc420f96bc135d0aebe31526475db965bf56..57253097fec53a481f347c5b7dc146eab4170b20 100644 GIT binary patch delta 17 YcmZojXh@hK%{Y0Yj5FipjR_0%0Xeh=WB>pF delta 17 YcmZojXh@hK&DcLt#+k8yW5NP`06BODTmS$7 -- Gitee From 66c2d1ea043d5bf95429cb5b84e78bdb0f104bf2 Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 21:38:55 +0800 Subject: [PATCH 07/13] kun --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index c438f3b..003bfaa 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ __pycache__ /dist /kcweb.egg-info /file -/utill/db/Queues \ No newline at end of file +#/utill/db/Queues \ No newline at end of file -- Gitee From 75b1f456f2665e49a216336d646e7e9449366575 Mon Sep 17 00:00:00 2001 From: kunkun Date: Mon, 11 May 2020 21:54:51 +0800 Subject: [PATCH 08/13] kun --- .gitignore | 3 +-- application/common/__init__.py | 1 - create.py | 2 ++ utill/Queues | Bin 12288 -> 12288 bytes 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 003bfaa..9d3fd53 100644 --- a/.gitignore +++ b/.gitignore @@ -10,5 +10,4 @@ __pycache__ /.vscode /dist /kcweb.egg-info -/file -#/utill/db/Queues \ No newline at end of file +/file \ No newline at end of file diff --git a/application/common/__init__.py b/application/common/__init__.py index e9df085..05e1c79 100644 --- a/application/common/__init__.py +++ b/application/common/__init__.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from .autoload import * -Queues.start() #开启队列监听 def return_list(lists,count,pagenow,pagesize): """返回分页列表 diff --git a/create.py b/create.py index f099bef..036c060 100644 --- a/create.py +++ b/create.py @@ -60,6 +60,8 @@ class create: content=('# #gunicorn -b 0.0.0.0:39010 '+self.appname+':app\n'+ 'from kcweb import web\n'+ 'import '+self.appname+' as application\n'+ + 'from '+self.appname+'.common import *\n'+ + 'Queues.start() #开启队列监听\n'+ 'app=web(__name__,application)\n'+ 'if __name__ == "__main__":\n'+ ' #host监听ip port端口 name python解释器名字 (windows一般是python linux一般是python3)\n'+ diff --git a/utill/Queues b/utill/Queues index 57253097fec53a481f347c5b7dc146eab4170b20..0050cc420f96bc135d0aebe31526475db965bf56 100644 GIT binary patch delta 17 YcmZojXh@hK&DcLt#+k8yW5NP`06BODTmS$7 delta 17 YcmZojXh@hK%{Y0Yj5FipjR_0%0Xeh=WB>pF -- Gitee From 24a7d740326671745c6e1345cc843e3493ed0559 Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 12 May 2020 01:33:25 +0800 Subject: [PATCH 09/13] kun --- .gitignore | 4 +++- utill/Queues | Bin 12288 -> 12288 bytes utill/http.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 9d3fd53..7afaf31 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,6 @@ __pycache__ /.vscode /dist /kcweb.egg-info -/file \ No newline at end of file +/file +/utill/db/Queues +utill/db/sqlitedata/kcwlicuxweb \ No newline at end of file diff --git a/utill/Queues b/utill/Queues index 0050cc420f96bc135d0aebe31526475db965bf56..59a2485c5ea3ecbb2fa749a1825af7000325d298 100644 GIT binary patch literal 12288 zcmeI2O>9(E6o6-FLrY;=V`7k%Z)1c=%>Dma5UtM{K}w-@s)7pMpL4O1g0y9!OQT8g zCvjz5m>8mo#soHQBqYWdL_jdcts57BuyW(VrDrU?wl%NDExea@+L_$<&AHz_=iEDK zpPm?-o`)hGo1Jm<1#jHbXf_*<6-A@b9BDKfefYg}HSw4Jr6>5a_xwLDWIS^J#TJfQ zH+-}){0%kK31qSx5Tfh0?hnxH7oj(F|$B*Dg z&qH%?`~(~gtDp9jlkL%NyXcNSKGrT)joo_G&A@iNbK4s`ngg4+Y-zqZ^*3|k1e}1m z#m99k2#c>3Tc=~OYrNZjvOQUh@9h@jQ)6SrzR6vCMkn_dyW9JV(W&m1FgxvGE)Qlz zn2N#SScDwz()+SyZ#y%0xahW@!+G4E%k~0hXOGRUe$mA^KRxpgYianl*6^)H|M02C z$XCOsMqVA+I{fSKhr|0?Keyg%?QHdJ`g+rAo5aT7H-51ZHWov_41F*(F|=vJ_Z!~W zuw(Gg!7m4o4sIK`Ht_Mlfr0zh|Fr&{^_{yQiR&rW25JMffjhuJ=iUvCJrA~;54QRZ zv&zUQlpsa{5sW(#n9`O;8725z=RUl7=F@BE&Ru`|RR4{WpWQt5?)5Y8-dK45;Mu+N zC12z`(CViwBxfx1fl41WI3rCo;8_Zs2q~6x*8O(+TNv#idFldCnD$6_Iq>*$F`<5VRLoad06; z&J0sjGhwKXiYzCra2o9zv~$66qa}}2#ALWe6c>soEx;}3EO)Z-W9c%4Y7&yP zy?8V+1|p5tr2tYVW*MQCkgMfO8KSiITuCaqP8bW2$f?u}1WVE>R^{aE!qxInhKLX_ zaw)A6l%dO*AVemZ)q)u%_!ta6DLu9Pok2tdM< zmNwonrxleZMM8u0GEoWwcx5D4$`>+38by+z1T;gE4M7Hnel2BMP!+3J=Sul}hUg%I zVG{86knk}%Cp7Vb0L8p@D^g{}5??6~WQg~;ERiLTDF)!EkCAwyl2^<}%>K&h{7~-C z5G^0BzO zG9+nZ9A+zE{Zf?-#mbfh3=k`+qoFdAOJz4hA{Wv-pR^)M0Y(KhVYeg(%_bkJlkH;p zbcSHYYMeghSoX$5qLndB;{+{Q0a=}F7t6^E2`*u8LkKGH9we4BE-j6oAj*pB+PPRx zWC%+N`;kO|37k@936~_LV2l^ex@srim-{lLSG&$Bhz^^hBHm)3lpI1J3>c-=)45RY z&5$UJRxu_UsA7_O;&FYwO1)x^T2+g^P>yE^=eTe6s+X~7h?9Z|&Xf`yt0)o2Dwp`X za!-Z`f=NP+N=FcMj(;{?F~J!>(Jck)fTN75) z&c8V^b8wdmo=`y|#U@KU1m`hvP2{*#WM%oR=Dghd+kA86j|P6#S8bp+P#dTX)COt; gwSn3|ZJ;(#8>kJ`25JL$kb$9Q-9(E6vt;M=@*@1OblY8JUeZCm}|ymg1;-0{Qs z=y_-^9zPL}`qigBIw1PQ(*& zZsl`L8)4;Iv2`XCyC+-Cmzz_?k7@KbG+dYZ5?rl!C zMvHkn_cq@0+SpY2%GlIa&PIxtn&V^B6Rl#)mQ28LGxLXI9!3ch9R`Lt=VxbJoXdk5 z5vIdne=Nj2-CMuQ?)cdwb8i)`=IgkQf7UWu#M#+nv#T%K5awr&+-BVxKC=95jt~8{ z_1lL|t-m~cWBAMAL&Fa>er=p@>}m84EexF+66^k0_szPvt{D7n@RPv z(f?QfxBW-^NBXYzecty*-vhnB^nTnseh-+qT4JR@r9h>?U7*1D{R6eVn;Z4bjkSh> zGBOAyh*6OU#;x#7X`n#{8QqO5pI*Q8<<%=!mOi?$_S%`Ru3tF2bm{E156{2(-M;yf zuW&Xs)>7t^HNc#w(ghu@ktUevSn`|*DemN~`TgRVrSCtudif$k-YZ##7^#>}j)~|Z zk;-`Bpfy4zPATzj5Sk&tg10e=;3zO}l~P7=sS-(!b45d!nUD;jQaYWyx7-s&y>Ng~ z zByC+pL8Re(?Ekjf^oG9m+Gf`-7wXw=`PpwlvA*KuDsSL5q5UOYdpCCA|lMNVgba4`1 zaNpS{C(DrxK{Z2&5v(_fglIWK<9lf+L>m?D9G#Qp)(lCC-~@QAd@;n#^nkGTMrgCEMiW85nAh@sI^$@J*ef{z*C!J zSGS&pa%Y}Q5*pC@1e-uc zVvJA3I!d8Slow0L5YX8F!D=2IWz6F)!!l)rVU%+01)Xznu{5_KMoLR@@s8onA|{S@ z3l{UyYZhD==P#5xLvRCNm<>W=Jwu~vhtZPS3KJn=`R*K@g>kV0LDNd>wN}{qsPcwM zN^yg6bF1M2b2_$l(tnv2w~o7(_g%PO`akBsBS7OS&xG^kqKtW)z$@%NAu$q?Fd})C z0<`t7o@*DsSo-YimHiuTx)zq7|Lg1jtl_IVDg`P9Dg`P9Dg`P9Dg`P9Dg`P9Dg`P9 VDg`P9?ji*S>pdImEB_C%^KaQRln(#^ diff --git a/utill/http.py b/utill/http.py index f8f5c61..7973038 100644 --- a/utill/http.py +++ b/utill/http.py @@ -17,7 +17,7 @@ class Http: get_cookies={} #获取最后的响应cookie get_cookie_str='' #获取最后的响应cookie 字符串 get_text='' #获取body响应内容 - get_content='' #获取二进制内容 + get_content='' #获取body响应二进制内容 get_response='' #获取响应对象 get_status_code=None #获取响应状态码 -- Gitee From b2117845fd2921de367124060938406920687733 Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 12 May 2020 01:34:12 +0800 Subject: [PATCH 10/13] kun --- utill/db/sqlitedata/kcwlicuxweb | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 utill/db/sqlitedata/kcwlicuxweb diff --git a/utill/db/sqlitedata/kcwlicuxweb b/utill/db/sqlitedata/kcwlicuxweb deleted file mode 100644 index e69de29..0000000 -- Gitee From ff515c1060cc240f0404093cc2a0b46c8111869a Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 12 May 2020 01:34:34 +0800 Subject: [PATCH 11/13] kun --- .gitignore | 15 - Events.py | 57 - LICENSE | 21 - README.md | 66 - __init__.py | 2 - app.py | 824 ---- application/__init__.py | 5 - application/api/__init__.py | 2 - application/api/common/__init__.py | 19 - application/api/common/autoload.py | 2 - application/api/controller/__init__.py | 5 - application/api/controller/v1/__init__.py | 4 - application/api/controller/v1/index.py | 16 - application/api/controller/v2/__init__.py | 4 - application/api/controller/v2/index.py | 10 - application/api/tpl/v1/index/index.html | 29 - application/api/tpl/v2/index/index.html | 29 - application/common/__init__.py | 75 - application/common/autoload.py | 5 - application/config/__init__.py | 40 - application/config/other.py | 15 - common/__init__.py | 5 - common/autoload.py | 323 -- common/globals.py | 9 - common/request.py | 44 - common/session.py | 62 - config/__init__.py | 103 - create.py | 95 - tpl/error.html | 27 - utill/Queues | Bin 12288 -> 0 bytes utill/app.py | 35 - utill/cache/cache.py | 261 -- utill/dateutil/__init__.py | 8 - utill/dateutil/_common.py | 43 - utill/dateutil/_version.py | 4 - utill/dateutil/easter.py | 89 - utill/dateutil/parser/__init__.py | 60 - utill/dateutil/parser/_parser.py | 1580 -------- utill/dateutil/parser/isoparser.py | 411 -- utill/dateutil/relativedelta.py | 599 --- utill/dateutil/rrule.py | 1736 --------- utill/dateutil/tz/__init__.py | 17 - utill/dateutil/tz/_common.py | 419 -- utill/dateutil/tz/_factories.py | 73 - utill/dateutil/tz/tz.py | 1836 --------- utill/dateutil/tz/win.py | 370 -- utill/dateutil/tzwin.py | 2 - utill/dateutil/utils.py | 71 - utill/dateutil/zoneinfo/__init__.py | 167 - utill/dateutil/zoneinfo/rebuild.py | 53 - utill/db/model.py | 220 -- utill/db/mongodb.py | 332 -- utill/db/mysql.py | 1020 ----- utill/db/pymysql/__init__.py | 141 - utill/db/pymysql/_auth.py | 265 -- utill/db/pymysql/_compat.py | 21 - utill/db/pymysql/_socketio.py | 134 - utill/db/pymysql/charset.py | 212 - utill/db/pymysql/connections.py | 1279 ------ utill/db/pymysql/constants/CLIENT.py | 31 - utill/db/pymysql/constants/COMMAND.py | 33 - utill/db/pymysql/constants/CR.py | 68 - utill/db/pymysql/constants/ER.py | 475 --- utill/db/pymysql/constants/FIELD_TYPE.py | 33 - utill/db/pymysql/constants/FLAG.py | 15 - utill/db/pymysql/constants/SERVER_STATUS.py | 11 - utill/db/pymysql/constants/__init__.py | 0 utill/db/pymysql/converters.py | 411 -- utill/db/pymysql/cursors.py | 536 --- utill/db/pymysql/err.py | 109 - utill/db/pymysql/optionfile.py | 23 - utill/db/pymysql/protocol.py | 341 -- utill/db/pymysql/times.py | 20 - utill/db/pymysql/util.py | 13 - utill/db/sqlite.py | 666 ---- utill/db/sqlitedata/kcwdb | 0 utill/filetype/__init__.py | 10 - utill/filetype/filetype.py | 98 - utill/filetype/helpers.py | 122 - utill/filetype/match.py | 119 - utill/filetype/types/__init__.py | 83 - utill/filetype/types/archive.py | 515 --- utill/filetype/types/audio.py | 166 - utill/filetype/types/base.py | 31 - utill/filetype/types/font.py | 99 - utill/filetype/types/image.py | 279 -- utill/filetype/types/isobmff.py | 33 - utill/filetype/types/video.py | 216 -- utill/filetype/utils.py | 72 - utill/http.py | 83 - utill/queues.py | 101 - utill/redis.py | 210 - utill/rediss/__init__.py | 41 - utill/rediss/_compat.py | 138 - utill/rediss/client.py | 3865 ------------------- utill/rediss/connection.py | 1261 ------ utill/rediss/exceptions.py | 65 - utill/rediss/lock.py | 274 -- utill/rediss/sentinel.py | 286 -- utill/rediss/utils.py | 33 - 100 files changed, 24356 deletions(-) delete mode 100644 .gitignore delete mode 100644 Events.py delete mode 100644 LICENSE delete mode 100644 README.md delete mode 100644 __init__.py delete mode 100644 app.py delete mode 100644 application/__init__.py delete mode 100644 application/api/__init__.py delete mode 100644 application/api/common/__init__.py delete mode 100644 application/api/common/autoload.py delete mode 100644 application/api/controller/__init__.py delete mode 100644 application/api/controller/v1/__init__.py delete mode 100644 application/api/controller/v1/index.py delete mode 100644 application/api/controller/v2/__init__.py delete mode 100644 application/api/controller/v2/index.py delete mode 100644 application/api/tpl/v1/index/index.html delete mode 100644 application/api/tpl/v2/index/index.html delete mode 100644 application/common/__init__.py delete mode 100644 application/common/autoload.py delete mode 100644 application/config/__init__.py delete mode 100644 application/config/other.py delete mode 100644 common/__init__.py delete mode 100644 common/autoload.py delete mode 100644 common/globals.py delete mode 100644 common/request.py delete mode 100644 common/session.py delete mode 100644 config/__init__.py delete mode 100644 create.py delete mode 100644 tpl/error.html delete mode 100644 utill/Queues delete mode 100644 utill/app.py delete mode 100644 utill/cache/cache.py delete mode 100644 utill/dateutil/__init__.py delete mode 100644 utill/dateutil/_common.py delete mode 100644 utill/dateutil/_version.py delete mode 100644 utill/dateutil/easter.py delete mode 100644 utill/dateutil/parser/__init__.py delete mode 100644 utill/dateutil/parser/_parser.py delete mode 100644 utill/dateutil/parser/isoparser.py delete mode 100644 utill/dateutil/relativedelta.py delete mode 100644 utill/dateutil/rrule.py delete mode 100644 utill/dateutil/tz/__init__.py delete mode 100644 utill/dateutil/tz/_common.py delete mode 100644 utill/dateutil/tz/_factories.py delete mode 100644 utill/dateutil/tz/tz.py delete mode 100644 utill/dateutil/tz/win.py delete mode 100644 utill/dateutil/tzwin.py delete mode 100644 utill/dateutil/utils.py delete mode 100644 utill/dateutil/zoneinfo/__init__.py delete mode 100644 utill/dateutil/zoneinfo/rebuild.py delete mode 100644 utill/db/model.py delete mode 100644 utill/db/mongodb.py delete mode 100644 utill/db/mysql.py delete mode 100644 utill/db/pymysql/__init__.py delete mode 100644 utill/db/pymysql/_auth.py delete mode 100644 utill/db/pymysql/_compat.py delete mode 100644 utill/db/pymysql/_socketio.py delete mode 100644 utill/db/pymysql/charset.py delete mode 100644 utill/db/pymysql/connections.py delete mode 100644 utill/db/pymysql/constants/CLIENT.py delete mode 100644 utill/db/pymysql/constants/COMMAND.py delete mode 100644 utill/db/pymysql/constants/CR.py delete mode 100644 utill/db/pymysql/constants/ER.py delete mode 100644 utill/db/pymysql/constants/FIELD_TYPE.py delete mode 100644 utill/db/pymysql/constants/FLAG.py delete mode 100644 utill/db/pymysql/constants/SERVER_STATUS.py delete mode 100644 utill/db/pymysql/constants/__init__.py delete mode 100644 utill/db/pymysql/converters.py delete mode 100644 utill/db/pymysql/cursors.py delete mode 100644 utill/db/pymysql/err.py delete mode 100644 utill/db/pymysql/optionfile.py delete mode 100644 utill/db/pymysql/protocol.py delete mode 100644 utill/db/pymysql/times.py delete mode 100644 utill/db/pymysql/util.py delete mode 100644 utill/db/sqlite.py delete mode 100644 utill/db/sqlitedata/kcwdb delete mode 100644 utill/filetype/__init__.py delete mode 100644 utill/filetype/filetype.py delete mode 100644 utill/filetype/helpers.py delete mode 100644 utill/filetype/match.py delete mode 100644 utill/filetype/types/__init__.py delete mode 100644 utill/filetype/types/archive.py delete mode 100644 utill/filetype/types/audio.py delete mode 100644 utill/filetype/types/base.py delete mode 100644 utill/filetype/types/font.py delete mode 100644 utill/filetype/types/image.py delete mode 100644 utill/filetype/types/isobmff.py delete mode 100644 utill/filetype/types/video.py delete mode 100644 utill/filetype/utils.py delete mode 100644 utill/http.py delete mode 100644 utill/queues.py delete mode 100644 utill/redis.py delete mode 100644 utill/rediss/__init__.py delete mode 100644 utill/rediss/_compat.py delete mode 100644 utill/rediss/client.py delete mode 100644 utill/rediss/connection.py delete mode 100644 utill/rediss/exceptions.py delete mode 100644 utill/rediss/lock.py delete mode 100644 utill/rediss/sentinel.py delete mode 100644 utill/rediss/utils.py diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 7afaf31..0000000 --- a/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -#以下文件不允许提交到git -__pycache__ -.settings -.buildpath -.project -*.log -*.pyc -*log/ -/.idea -/.vscode -/dist -/kcweb.egg-info -/file -/utill/db/Queues -utill/db/sqlitedata/kcwlicuxweb \ No newline at end of file diff --git a/Events.py b/Events.py deleted file mode 100644 index 574d83f..0000000 --- a/Events.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -import os, sys, time, subprocess -from watchdog.observers import Observer -from watchdog.events import FileSystemEventHandler -class MyFileSystemEventHander(FileSystemEventHandler): - def __init__(self, fn): - super(MyFileSystemEventHander, self).__init__() - self.restart = fn - - def on_any_event(self, event): - if event.src_path.endswith('.py'): - print('* 更新文件:%s' % event.src_path) - self.restart() -class Events: - command = ['echo', 'ok'] - process = None - def __init__(self,argv): - argv=argv - # print(argv) - if 'python' not in argv[0]: - argv.insert(0, 'python3') - self.command = argv - # print(self.command) - paths = os.path.abspath('.') - self.start_watch(paths, None) - - def kill_process(self): - "关闭" - if self.process: - self.process.kill() - self.process.wait() - self.process = None - - def start_process(self): - "启动" - self.process = subprocess.Popen(self.command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr) - - def restart_process(self): - "重启" - self.kill_process() - self.start_process() - - def start_watch(self,path, callback): - "执行" - observer = Observer() - observer.schedule(MyFileSystemEventHander(self.restart_process), path, recursive=True) - observer.start() - self.start_process() - try: - while True: - time.sleep(0.5) - except KeyboardInterrupt: - self.kill_process() - # observer.stop() - # observer.join() - -# Events(['server.py']) #执行server.py文件 \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 6cebfc2..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 坤坤 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index 7dee6dd..0000000 --- a/README.md +++ /dev/null @@ -1,66 +0,0 @@ -完整文档请参考:http://intapp.kwebapp.cn/index/index/doc/docde/1 -#### 创建应用 -- 新建一个server.py文件,内容如下,执行python3 server.py创建应用 -- 如下面的代码创建了一个app应用,同时在app应用下创建了一个api模块 -```server.py -from kcweb.create import create -create("app","api") # 创建项目 -``` - -- 您的目录结构应该是这样,如下: -``` -├─./ 框架目录 -├─app 公共方法目录 -│ ├─common 公共函数目录 -│ │ ├─__init__.py 函数文件 -│ ├─config 配置目录 -│ │ ├─__init__.py 配置文件 -│ ├─api 模块目录 -│ │ ├─common 该模块的公共函数目录 -│ │ │ ├─__init__.py 函数文件 -│ │ ├─controller 控制器目录 -│ │ │ ├─__init__.py 版本初始化文件 -│ │ │ ├─v1 -│ │ │ │ ├─__init__.py 函数初始化文件 -│ │ │ │ ├─index.py 控制器文件 -│ │ │ ├─v2 -│ │ │ │ ├─__init__.py 函数初始化文件 -│ │ │ │ ├─index.py 控制器文件 -│ │ ├─tpl 模板文件目录 -│ │ │ ├─v1 -│ │ │ │ ├─index -│ │ │ │ │ ├─index.html 模块文件 -│ │ │ ├─v1 -│ │ │ │ ├─index -│ │ │ │ │ ├─index.html 模块文件 -│ │ ├─__init__.py 控制器初始化文件 -│ ├─static 静态资源目录 -│ ├─runtime 缓存目录 -│ ├─__init__.py 自动导入模块文件 -├─server.py 应用创建后生成的运行文件(应用创建时自动创建) -``` -- 其中server.py文件内容将被修改如下 -``` -# #gunicorn -b 0.0.0.0:39001 server:app -from kcweb import web -import app as application -app=web(__name__,application) -if __name__ == "__main__": - #app 是当前文件名 host监听ip port端口 name python解释器名字 (windows一般是python linux一般是python3) - app.run("server",host="0.0.0.0",port="39001",name="python") -``` -- 如果您当前系统的python解释器名字是python3,你应该是在当前目录下执行python3 server.py。 然后访问127.0.0.1:39001 - - -- 如果您当前系统的python解释器名字是python,您应该修改server.py代码如下 -``` -# #gunicorn -b 0.0.0.0:39001 server:app -from kcweb import web -import app as application -app=web(__name__,application) -if __name__ == "__main__": - #app 是当前文件名 host监听ip port端口 - app.run("server",host="0.0.0.0",port="39001",name="python") -``` -然后访问127.0.0.1:39001 - diff --git a/__init__.py b/__init__.py deleted file mode 100644 index 72fc2be..0000000 --- a/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -from .app import web \ No newline at end of file diff --git a/app.py b/app.py deleted file mode 100644 index 53cb075..0000000 --- a/app.py +++ /dev/null @@ -1,824 +0,0 @@ -# -*- coding: utf-8 -*- -import socket,time,re,os,sys,traceback,threading,urllib -from . Events import Events -from . common import * -from . import config -from mako.template import Template -from datetime import datetime -from threading import local -from .utill import filetype -from kcweb.utill.cache import cache as kcwcache -class web: - __name=None - __appname=None - __config=config - def __new__(self,name,appname=None): - self.__name=name - self.__appname=appname - if self.__name != '__main__': - def apps(env, start_response): - # REQUEST_METHOD=env['REQUEST_METHOD'] #GET - # QUERY_STRING=env['QUERY_STRING'] #a=1&b=1 - # RAW_URI=env['RAW_URI'] #/aa/bb/cc?a=1&b=1 - # SERVER_PROTOCOL=env['SERVER_PROTOCOL'] #HTTP/1.1 - # HTTP_HOST=env['HTTP_HOST'] #212.129.149.238:39010 - # HTTP_COOKIE=env['HTTP_COOKIE'] #cookie - # REMOTE_ADDR=env['REMOTE_ADDR'] #27.156.27.201 - # PATH_INFO=env['PATH_INFO'] #/aa/bb/cc - # HTTP_USER_AGENT=env['HTTP_USER_AGENT'] #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 - try: - env['BODY_DATA']=str(env['wsgi.input'].next(), encoding = "utf-8") - except: - env['BODY_DATA']="" - p=(config.app['staticpath']+env['RAW_URI'].replace(' ','')) - status='200 ok' - if os.path.isfile(p): - kind = filetype.guess(p) - if kind is None: - f=open(p,"rb") - body=f.read() - f.close() - resheader=[ - ("Cache-Control","public, max-age=43200"), - ] - else: - f=open(p,"rb") - body=f.read() - f.close() - resheader=[ - ("Content-Type",kind.mime), - ("Cache-Control","public, max-age=43200"), - ("Accept-Ranges","bytes"), - # ("Content-Length",len(body)) - ] - else: - status,resheader,body=self.__routes(self,env) - if type(body) is bytes: - pass - else: - body=bytes(body, encoding='utf-8') - # print(env['bodydata']) - # print("\n\nwsgi.input",env['wsgi.input']) - # print("\n\ndir(env['wsgi.input'])",dir(env['wsgi.input'])) - # print("\n\nenv['wsgi.input'].__dict__",env['wsgi.input'].__dict__) - # try: - # print("\n\nwsgi.input.buf()",env['wsgi.input'].buf()) - # except Exception as e: - # print("\n\nwsgi.input.buf() error:",e) - # try: - # print("\n\nwsgi.input.next()",env['wsgi.input'].next()) - # except Exception as e: - # print("\n\nwsgi.input.next() error:",e) - # try: - # print("\n\nwsgi.input.read()",env['wsgi.input'].read()) - # except Exception as e: - # print("\n\nwsgi.input.read() error:",e) - # try: - # print("\n\nwsgi.input.reader()",env['wsgi.input'].reader()) - # except Exception as e: - # print("\n\nwsgi.input.reader() error:",e) - # try: - # print("\n\nwsgi.input.readline()",env['wsgi.input'].readline()) - # except Exception as e: - # print("\n\nwsgi.input.readline() error:",e) - # try: - # print("\n\nwsgi.input.readlines()",env['wsgi.input'].readlines()) - # except Exception as e: - # print("\n\nwsgi.input.readlines() error:",e) - # try: - # print("wsgi.input.aa",env['wsgi.input'].get("SCRIPT_NAME", "")) - # except Exception as e: - # print("wsgi.input.get('aa') error:",e) - # try: - # print("wsgi.input.aa",env['wsgi.input']['aa']) - # except Exception as e: - # print("wsgi.input['aa'] error:",e) - # print(dir(env['wsgi.input']).getsize) - # from io import StringIO - # stdout = StringIO() - # print("Hello world!", file=stdout) - # print(file=stdout) - # h = sorted(env.items()) - # for k,v in h: - # print(k,'=',repr(v), file=stdout) - # print(stdout.getvalue().encode("utf-8")) - start_response(status,resheader) - return [body] - return apps - else: - return super().__new__(self) - def run(self,host="127.0.0.1",port="39001",name='python'): - """运行开发环境 - - host: 监听地址 - - port: 端口 - - name: python命令行解释机名字 默认python - """ - if len(sys.argv)==1 or (len(sys.argv)==2 and sys.argv[1]=='eventlog'): - filename=sys.argv[0][:-3] - if self.__config.app['app_debug']: - arg=sys.argv - if len(arg)==2 and arg[1]=='eventlog': - self.__impl(host=host,port=port,filename=filename) - else: - Events([name,str(filename)+'.py','eventlog']) - else: - self.__impl( - host=host, - port=port, - filename=filename - ) - else: - try: - RAW_URI=sys.argv[1] - except:pass - else: - PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc - if PATH_INFO[0]=='/': - PATH_INFO=PATH_INFO[1:] - QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 - reqheader={ - 'REQUEST_METHOD':'GET', - 'RAW_URI':RAW_URI, - 'PATH_INFO':PATH_INFO, - 'QUERY_STRING':QUERY_STRING, - 'SERVER_PROTOCOL':'', - 'HTTP_HOST':'', - 'HTTP_COOKIE':'', - 'REMOTE_ADDR':'', - 'HTTP_USER_AGENT':'', - 'BODY_DATA':'' - } - status,resheader,body=self.__routes(reqheader) - if 'body' not in body and 'html' not in body and '<' not in body and '>' not in body: - print(body) - exit() - def __impl(self,host,port,filename): - "运行测试服务器" - try: - self.__http_server( - host=host, - port=port, - filename=filename - ) - except KeyboardInterrupt: - pass - def __get_modular(self,header): - "获取模块" - modular='' - route=self.__config.route - if route['modular']: - if isinstance(route['modular'],str): - modular=route['modular'] - else: - HTTP_HOST=header['HTTP_HOST'].split(".")[0] - for mk in route['modular']: - if HTTP_HOST in mk: - modular=mk[HTTP_HOST] - return modular - def __getconfigroute(self,PATH_INFO,header): - "使用配置路由" - route=self.__config.route - routedefault=route['default'] - methods=route['methods'] - paths='' - for path in PATH_INFO: - paths+="/"+path - try: - for item in route['children']: - if ':' in item['path']: - path=item['path'].split(':') - if(len(path)==len(PATH_INFO)): - is_pp=False - try: - item['methods'] - except:pass - else: - methods=item['methods'] - for k in methods: #匹配请求方式 - if header['REQUEST_METHOD'] in k: - is_pp=True - break - if path[0]==paths[:len(path[0])] and is_pp: - del PATH_INFO[0] - cs=PATH_INFO - PATH_INFO=item['component'].split('/') - for v in cs: - PATH_INFO.append(v) - routedefault=True - break - elif item['path']==paths or item['path']+'/'==paths: - PATH_INFO=item['component'].split('/') - routedefault=True - break - except:pass - return routedefault,PATH_INFO - def defaultroute(self,header,PATH_INFO): - "路由匹配" - route=self.__config.route - modular=web.__get_modular(self,header) - routedefault=route['default'] - methods=route['methods'] - if routedefault: - edition='index' - files=route['files'] - funct=route['funct'] - else: - edition='' - files='' - funct='' - param=[] - urls='' - i=0 - HTTP_HOST=header['HTTP_HOST'].split(".")[0] - ##默认路由start ################################################################################# - - if modular: - if route['edition']: #匹配模块并且匹配了版本 - edition=route['edition'] - routedefault,PATH_INFO=web.__getconfigroute( - self, - PATH_INFO, - header - ) - if routedefault: #使用路由 - for path in PATH_INFO: - if path: - if i==0: - files=path - urls=urls+"/"+str(path) - elif i==1: - funct=path - urls=urls+"/"+str(path) - else: - param.append(urllib.parse.unquote(path)) - i+=1 - else: #配置模块没有配置版本 - routedefault,PATH_INFO=web.__getconfigroute( - self, - PATH_INFO, - header - ) - if routedefault: #使用默认路由 - for path in PATH_INFO: - if path: - if i==0: - edition=path - elif i==1: - files=path - urls=urls+"/"+str(path) - elif i==2: - funct=path - urls=urls+"/"+str(path) - else: - param.append(urllib.parse.unquote(path)) - i+=1 - elif route['edition']: #配置版本的但没有匹配模块 - edition=route['edition'] - routedefault,PATH_INFO=web.__getconfigroute( - self, - PATH_INFO, - header - ) - if routedefault: #使用默认路由 - for path in PATH_INFO: - if path: - if i==0: - modular=path - elif i==1: - files=path - urls=urls+"/"+str(path) - elif i==2: - funct=path - urls=urls+"/"+str(path) - else: - param.append(urllib.parse.unquote(path)) - i+=1 - else: #完全默认 - routedefault,PATH_INFO=web.__getconfigroute(self,PATH_INFO,header) - if routedefault: #使用默认路由 - for path in PATH_INFO: - if path: - if i==0: - modular=path - elif i==1: - edition=path - elif i==2: - files=path - urls=urls+"/"+str(path) - elif i==3: - funct=path - urls=urls+"/"+str(path) - else: - param.append(urllib.parse.unquote(path)) - i+=1 - #默认路由end ############################################################ - return methods,modular,edition,files,funct,tuple(param) - def __tran(self,data,status,resheader): - "转换控制器返回的内容" - if isinstance(data,tuple): - i=0 - for item in data: - if i==0: - body=item - elif i==1: - status=item - elif i==2: - if isinstance(item,dict): - for key in item: - resheader[key]=item[key] - else: - raise Exception('错误!这个不是一个字典') - else: - break - i+=1 - else: - body=data - return body,status,resheader - def __set_globals(self,header): - globals.HEADER.Method=header['REQUEST_METHOD'] - globals.HEADER.URL=header['RAW_URI'] - globals.HEADER.PATH_INFO=header['PATH_INFO'] - globals.HEADER.QUERY_STRING=header['QUERY_STRING'] - globals.HEADER.SERVER_PROTOCOL=header['SERVER_PROTOCOL'] - globals.HEADER.HTTP_HOST=header['HTTP_HOST'] - globals.HEADER.BODY_DATA=header['BODY_DATA'] - try: - globals.HEADER.HTTP_COOKIE=header['HTTP_COOKIE'] - except: - globals.HEADER.HTTP_COOKIE=None - globals.HEADER.HTTP_USER_AGENT=header['HTTP_USER_AGENT'] - def __del_globals(): - globals.VAR = local() - globals.HEADER = local() - globals.G = local() - def __routes(self,header): - body="这是一个http测试服务器" - status="200 ok" - resheader={"Content-Type":"text/html; charset=utf-8"} - web.__set_globals(self,header) - PATH_INFO=header['PATH_INFO'].split('/') - if PATH_INFO[0]==' ' or PATH_INFO[0]=='': - del PATH_INFO[0] - methods,modular,edition,files,funct,param=web.defaultroute(self,header,PATH_INFO) - if header['REQUEST_METHOD'] in methods: - try: - obj=getattr(web.__appname,modular) - except (AttributeError,UnboundLocalError): - status="500 Internal Server Error" - body=web.__tpl( - title = status, - e=status, - data="无法找到目录:"+str(modular)+"/" - ) - else: - try: - obj=getattr(obj,"controller") - except (AttributeError,UnboundLocalError): - status="404 Not Found" - body=web.__tpl( - title = status, - e=status, - data="无法找到目录:"+str(modular)+"/controller/" - ) - else: - try: - obj=getattr(obj,edition) - except (AttributeError,UnboundLocalError) as e: - con="无法找到目录:"+str(modular)+"/controller/"+str(edition)+"/" - try: - data=getattr(obj,"error")(e,con) - body,status,resheader=web.__tran( - self, - data, - status, - resheader - ) - except (AttributeError,UnboundLocalError): - status="404 Not Found" - body=web.__tpl( - title = status, - e=status,data=con - ) - except Exception as e: - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms,e=e - ) - else: - try: - obj=getattr(obj,files) - except (AttributeError,UnboundLocalError) as e: - con="无法找到文件:"+str(modular)+"/controller/"+str(edition)+"/"+str(files)+".py" - try: - data=getattr(obj,"error")(e,con) - body,status,resheader=web.__tran( - self - ,data - ,status - ,resheader - ) - except (AttributeError,UnboundLocalError): - status="404 Not Found" - body=web.__tpl( - title = status - ,data=con - ,e=status) - except Exception as e: - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms, - e=e - ) - else: - try: - data=None - if self.__config.app['before_request']: #请求前执行的函数 - try: - data=getattr(obj,self.__config.app['before_request'])() - if data: - body,status,resheader=web.__tran( - self,data, - status, - resheader - ) - except (AttributeError): - print(traceback.format_exc()) - pass - except Exception as e: - try: - data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) - body,status,resheader=web.__tran( - self,data, - status, - resheader - ) - except (AttributeError): - data=True - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - # print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms,e=e - ) - except Exception as e: - data=True - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms,e=e - ) - if not data: - data=getattr(obj,funct)(*param) - body,status,resheader=web.__tran( - self,data, - status, - resheader - ) - except Exception as e: - try: - data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) - body,status,resheader=web.__tran( - self,data, - status, - resheader - ) - except (AttributeError): - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms, - e=e - ) - except Exception as e: - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms, - e=e - ) - else: - status="405 Method Not Allowed" - body=web.__tpl( - title = status, - data='405 Method Not Allowed', - e='' - ) - try: - resheader['set-cookie']=globals.set_cookie - del globals.set_cookie - except:pass - - if self.__config.app['after_request']: #请求后执行的函数 - try: - data=getattr(obj,self.__config.app['after_request'])() - if data: - body,status,resheader=web.__tran(self,data,status,resheader) - except (AttributeError,UnboundLocalError):pass - except Exception as e: - try: - data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) - body,status,resheader=web.__tran( - self,data, - status, - resheader - ) - except AttributeError as e: - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status - ,data=errms, - e=e - ) - except Exception as e: - status="500 Internal Server Error" - errms=status - if self.__config.app['app_debug']: - print(traceback.format_exc()) - errms=traceback.format_exc().split("\n") - body=web.__tpl( - title = status, - data=errms, - e="" - ) - resheaders=[] - for key in resheader: - resheaders.append((key,resheader[key])) - web.__del_globals() - if isinstance(resheaders,list): - if not body: - body='' - if type(body) is bytes: - pass - else: - body=str(body) - return str(status),resheaders,body - else: - raise Exception() - def __tpl(**context): - path=os.path.split(os.path.realpath(__file__))[0] - body='' - with open(path+'/tpl/error.html', 'r',encoding='utf-8') as f: - content=f.read() - t=Template(content) - body=t.render(**context) - return body - - - def __http_server(self,host,port,filename): - tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM) - try: - tcp_socket.bind((host,int(port))) - except OSError: - print("通常每个套接字地址(协议/网络地址/端口)只允许使用一次(按CTRL+C退出)") - else: - tcp_socket.listen(1024) - print('! 警告:这是开发服务器。不要在生产环境中部署使用它') - print('* 生产环境中建议使用gunicorn,gunicorn运行命令如:gunicorn -b '+host+':'+str(port)+' '+str(filename)+':app') - if self.__config.app['app_debug']: - print('* 调试器:开启') - else: - print('* 调试器:已关闭') - print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)") - while True: - new_tcp_socket,client_info=tcp_socket.accept() - t=threading.Thread(target=self.__server_client,args=(new_tcp_socket,)) - t.daemon=True - t.start() - tcp_socket.close() - def __server_client(self,new_socket): - # 处理http的的请求 - data=new_socket.recv(1047576).decode() - if data: - datas=data.split("\r\n") - data1=datas[0] - #reqsest - REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET - RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1 - if RAW_URI: - RAW_URI=RAW_URI[0] - else: - RAW_URI='' - PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc - QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 - SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1 - HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010 - if HTTP_HOST: - HTTP_HOST=HTTP_HOST[0] - else: - HTTP_HOST='' - HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie - if HTTP_COOKIE: - HTTP_COOKIE=HTTP_COOKIE[0] - else: - HTTP_COOKIE='' - REMOTE_ADDR='' - HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 - if HTTP_USER_AGENT: - HTTP_USER_AGENT=HTTP_USER_AGENT[0] - else: - HTTP_USER_AGENT='' - BODY_DATA=datas[len(datas)-1] - # print(data) - #reqsest - reqheader={ - 'REQUEST_METHOD':REQUEST_METHOD, - 'RAW_URI':RAW_URI, - 'PATH_INFO':PATH_INFO, - 'QUERY_STRING':QUERY_STRING, - 'SERVER_PROTOCOL':SERVER_PROTOCOL, - 'HTTP_HOST':HTTP_HOST, - 'HTTP_COOKIE':HTTP_COOKIE, - 'REMOTE_ADDR':REMOTE_ADDR, - 'HTTP_USER_AGENT':HTTP_USER_AGENT, - 'BODY_DATA':BODY_DATA - } - p=(config.app['staticpath']+RAW_URI.replace(' ','')) - # print("目录",p) - status='200 ok' - if os.path.isfile(p): - # print('静态文件',p) - kind = filetype.guess(p) - if kind is None: - - f=open(p,"rb") - body=f.read() - f.close() - resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] - - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - else: - f=open(p,"rb") - body=f.read() - f.close() - resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - else: - status,resheader,body=self.__routes(reqheader) - if type(body) is bytes: - pass - else: - body=body.encode() - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - - print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-") - t=time.time() - header+="Server:kcweb\n" - header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') - for t in resheader: - header+="%s:%s\n" % (t[0],t[1]) - header+="\n" - try: - new_socket.send(header.encode()) - new_socket.send(body) - except Exception as e: - pass - new_socket.close() - def __http_sever(self,host,port): - #http测试服务器 - if self.__config.app['app_debug']: - print('* 调试器:开启') - else: - print('* 调试器:已关闭') - print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)") - tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM) - tcp_socket.bind((host,int(port))) - tcp_socket.listen(1024) - pack_length=1024 - tcp_socket.setblocking(False) - tcp_socket_list=list() - - - while True: - try: - new_tcp_socket,client_info=tcp_socket.accept() - except: - pass - else: - new_tcp_socket.setblocking(False) - tcp_socket_list.append(new_tcp_socket) - for cli_soc in tcp_socket_list: - try: - data=cli_soc.recv(pack_length).decode() - except Exception as e: - pass - else: - if data: - datas=data.split("\r\n") - data1=datas[0] - #reqsest - REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET - RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1 - if RAW_URI: - RAW_URI=RAW_URI[0] - else: - RAW_URI='' - PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc - QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 - SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1 - HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010 - if HTTP_HOST: - HTTP_HOST=HTTP_HOST[0] - else: - HTTP_HOST='' - HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie - if HTTP_COOKIE: - HTTP_COOKIE=HTTP_COOKIE[0] - else: - HTTP_COOKIE='' - REMOTE_ADDR='' - HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 - if HTTP_USER_AGENT: - HTTP_USER_AGENT=HTTP_USER_AGENT[0] - else: - HTTP_USER_AGENT='' - BODY_DATA=datas[len(datas)-1] - #reqsest - reqheader={ - 'REQUEST_METHOD':REQUEST_METHOD, - 'RAW_URI':RAW_URI, - 'PATH_INFO':PATH_INFO, - 'QUERY_STRING':QUERY_STRING, - 'SERVER_PROTOCOL':SERVER_PROTOCOL, - 'HTTP_HOST':HTTP_HOST, - 'HTTP_COOKIE':HTTP_COOKIE, - 'REMOTE_ADDR':REMOTE_ADDR, - 'HTTP_USER_AGENT':HTTP_USER_AGENT, - 'BODY_DATA':BODY_DATA - } - p=(config.app['staticpath']+RAW_URI.replace(' ','')) - - status='200 ok' - if os.path.isfile(p): - kind = filetype.guess(p) - if kind is None: - f=open(p,"rb") - body=f.read() - f.close() - resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] - - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - else: - f=open(p,"rb") - body=f.read() - f.close() - resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - else: - status,resheader,body=self.__routes(reqheader) - body=body.encode() - header="HTTP/1.1 %s \n" % status - header+="Content-Length:%d\n" % len(body) - - print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-") - t=time.time() - header+="Server:kcweb\n" - header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') - for t in resheader: - header+="%s:%s\n" % (t[0],t[1]) - header+="\n" - try: - cli_soc.send(header.encode()) - cli_soc.send(body) - except Exception as e: - cli_soc.close() - else: - cli_soc.close() - tcp_socket_list.remove(cli_soc) - tcp_socket.close() diff --git a/application/__init__.py b/application/__init__.py deleted file mode 100644 index 915fbee..0000000 --- a/application/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -#导入模块 -% for i in tuple_modular: -from . import ${i} -% endfor diff --git a/application/api/__init__.py b/application/api/__init__.py deleted file mode 100644 index 0f9c434..0000000 --- a/application/api/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -from . import controller \ No newline at end of file diff --git a/application/api/common/__init__.py b/application/api/common/__init__.py deleted file mode 100644 index 0917cdf..0000000 --- a/application/api/common/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -from .autoload import * -#下面的方法在当前模块中有效 -def before_request(): - G.userinfo=get_session("userinfo") - print('${modular}模块在请求前执行,我是要在配置文件配置后才能生效哦!',G.userinfo) -def after_request(): - print('${modular}模块在请求后执行,我是要在配置文件配置后才能生效哦!') -def set_session(name,value,expire=None): - "设置session" - return session.set("${appname}${modular}"+str(name),value,expire) -def get_session(name): - "获取session" - return session.get("${appname}${modular}"+str(name)) -def del_session(name): - "删除session" - return session.rm("${appname}${modular}"+str(name)) -def tpl(path,**context): - return Template("/${modular}/tpl/"+str(path),**context) diff --git a/application/api/common/autoload.py b/application/api/common/autoload.py deleted file mode 100644 index 05965c2..0000000 --- a/application/api/common/autoload.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -from ${appname}.common import * \ No newline at end of file diff --git a/application/api/controller/__init__.py b/application/api/controller/__init__.py deleted file mode 100644 index 5ab9848..0000000 --- a/application/api/controller/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from . import v1,v2 -# def error(err,data): -# "该函数在当前目录下无法匹配时被调用" -# return data,"200",{"Content-Type":"text/json; charset=utf-8"} \ No newline at end of file diff --git a/application/api/controller/v1/__init__.py b/application/api/controller/v1/__init__.py deleted file mode 100644 index ab68b08..0000000 --- a/application/api/controller/v1/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from . import index -# def error(err,data): -# "该函数在找不到模块时执行函数时和框架报错时被调用" -# return str(err) \ No newline at end of file diff --git a/application/api/controller/v1/index.py b/application/api/controller/v1/index.py deleted file mode 100644 index 80b47d4..0000000 --- a/application/api/controller/v1/index.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -from ${appname}.${modular}.common import * -def index(): - return tpl("/v1/index/index.html",title="欢迎使用kcweb框架",data=['这是${appname}应用${modular}模块下v1版本的一个模板渲染测试效果']) -def inter(id='',title=""): - data={ - 'title':title, - 'id':id - } - return successjson(data) -def home(id='',title=""): - data={ - "title":"标题是"+title, - "id":"id是"+id - } - return successjson(data) \ No newline at end of file diff --git a/application/api/controller/v2/__init__.py b/application/api/controller/v2/__init__.py deleted file mode 100644 index ab68b08..0000000 --- a/application/api/controller/v2/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from . import index -# def error(err,data): -# "该函数在找不到模块时执行函数时和框架报错时被调用" -# return str(err) \ No newline at end of file diff --git a/application/api/controller/v2/index.py b/application/api/controller/v2/index.py deleted file mode 100644 index 25d4d39..0000000 --- a/application/api/controller/v2/index.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -from ${appname}.${modular}.common import * -def index(): - return tpl("/v2/index/index.html",title="欢迎使用kcweb框架",data=['这是${appname}应用${modular}模块下v2版本的一个模板渲染测试效果']) -def inter(): - data={ - 'title':'欢迎使用kcweb框架', - 'desc':'这是${appname}应用${modular}模块下v2版本的json输出效果' - } - return successjson(data) \ No newline at end of file diff --git a/application/api/tpl/v1/index/index.html b/application/api/tpl/v1/index/index.html deleted file mode 100644 index f4818f5..0000000 --- a/application/api/tpl/v1/index/index.html +++ /dev/null @@ -1,29 +0,0 @@ - - - -${title} - - - - - - - -
-

这是v1模板文件

- % if title: -

${title}

- % endif - % if isinstance(data,str): - ${data} - % elif isinstance(data,list): -
    - % for i in data: -
  • ${i}
  • - % endfor -
- % endif -
- - - \ No newline at end of file diff --git a/application/api/tpl/v2/index/index.html b/application/api/tpl/v2/index/index.html deleted file mode 100644 index f331dab..0000000 --- a/application/api/tpl/v2/index/index.html +++ /dev/null @@ -1,29 +0,0 @@ - - - -${title} - - - - - - - -
-

这是v2模板文件

- % if title: -

${title}

- % endif - % if isinstance(data,str): - ${data} - % elif isinstance(data,list): -
    - % for i in data: -
  • ${i}
  • - % endfor -
- % endif -
- - - \ No newline at end of file diff --git a/application/common/__init__.py b/application/common/__init__.py deleted file mode 100644 index 05e1c79..0000000 --- a/application/common/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -from .autoload import * -def return_list(lists,count,pagenow,pagesize): - """返回分页列表 - - lists 数据库列表数据 - - count 数据库总数量 - - pagenow 页码 - - pagesize 每页数量 - """ - data={ - 'count':count, - 'pagenow':pagenow, - 'pagesize':pagesize, - 'pagecount':math.ceil(int(count)/int(pagesize)), - 'lists':lists - } - return data -def successjson(data=[],code=0,msg="成功",status='200 ok'): - """成功说在浏览器输出包装过的json - - 参数 data 结果 默认[] - - 参数 code body状态码 默认0 - - 参数 msg body状态描述 默认 成功 - - 参数 status http状态码 默认 200 - - 返回 json字符串结果集 - """ - res={ - "code":code, - "msg":msg, - "time":times(), - "data":data - } - return json_encode(res),status,{"Content-Type":"application/json; charset=utf-8","Access-Control-Allow-Origin":"*"} -def errorjson(data=[],code=1,msg="失败",status='500 error'): - """错误时在浏览器输出包装过的json - - 参数 data 结果 默认[] - - 参数 code body状态码 默认0 - - 参数 msg body状态描述 默认 成功 - - 参数 status http状态码 默认 200 - - 返回 json字符串结果集 - """ - return successjson(data=data,code=code,msg=msg,status=status) -def randoms(lens=6,types=1): - """生成随机字符串 - - lens 长度 - - types 1数字 2字母 3字母加数字 - """ - strs="0123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM,!@#$%^&*()_+=-;',./:<>?" - if types==1: - strs="0123456789" - elif types==2: - strs="qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM" - elif types==3: - strs="0123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM" - k='' - i=0 - while i < lens: - k+=random.choice(strs) - i+=1 - return k \ No newline at end of file diff --git a/application/common/autoload.py b/application/common/autoload.py deleted file mode 100644 index 99a5719..0000000 --- a/application/common/autoload.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from kcweb.common import * -from ${appname} import config -import math,random -G=globals.G \ No newline at end of file diff --git a/application/config/__init__.py b/application/config/__init__.py deleted file mode 100644 index 3257846..0000000 --- a/application/config/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -from .other import * -#下面的配置都是全局的 -# 应用配置 -app['app_debug']=True #是否开启调试模式 -app['tpl_folder']='./${appname}' #设置模板文件目录名 注意:所有的配置目录都是以您的运行文件所在目录开始 -app['before_request']='before_request' #设置请求前要执行的函数名 -app['after_request']='after_request' #设置请求后要执行的函数名 -app['staticpath']='${appname}/static' #静态主要目录 -# redis配置 -redis['host']='127.0.0.1' #服务器地址 -redis['port']=6379 #端口 -redis['password']='' #密码 -redis['db']=0 #Redis数据库 注:Redis用0或1或2等表示 -redis['pattern']=True # True连接池链接 False非连接池链接 -redis['ex']=0 #过期时间 (秒) -#缓存配置 -cache['type']='File' #驱动方式 支持 File Redis -cache['path']='./${appname}/runtime/cachepath' #缓存保存目录 -cache['expire']=120 #缓存有效期 0表示永久缓存 -cache['host']=redis['host'] #Redis服务器地址 -cache['port']=redis['port'] #Redis 端口 -cache['password']=redis['password'] #Redis登录密码 -cache['db']=1 #Redis数据库 注:Redis用1或2或3等表示 -# session配置 -session['type']='File' #session 存储类型 支持 file、Redis -session['path']='./${appname}/runtime/session/temp' #session缓存目录 -session['expire']=86400 #session默认有效期 该时间是指session在服务的保留时间,通常情况下浏览器上会保留该值的10倍 -session['prefix']="KCW" # SESSION 前缀 -session['host']=redis['host'] #Redis服务器地址 -session['port']=redis['port'] #Redis 端口 -session['password']=redis['password'] #Redis登录密码 -session['db']=2 #Redis数据库 注:Redis用1或2或3等表示 - -#email配置 -email['sender']='' #发件人邮箱账号 -email['pwd']='' #发件人邮箱密码(如申请的smtp给的口令) -email['sendNick']='' #发件人昵称 -email['theme']='' #默认主题 -email['recNick']='' #默认收件人昵称 \ No newline at end of file diff --git a/application/config/other.py b/application/config/other.py deleted file mode 100644 index 1e4e433..0000000 --- a/application/config/other.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -from kcweb.config import * -#路由配置 -route['default']=True #是否开启默认路由 默认路由开启后面不影响以下配置的路由,模块名/版本名/控制器文件名/方法名 作为路由地址 如:http://www.kcw.com/api/v1/index/index/ -route['modular']='${modular}' -route['edition']='v1' -route['files']='index' #默认路由文件(控制器) -route['funct']='index' #默认路由函数 (操作方法) -route['methods']=['POST','GET'] #默认请求方式 -route['children']=[ - {'title':'首页','path':'','component':'index/home','methods':['POST','GET']}, - {'title':'接口','path':'/inter/:id','component':'index/inter','methods':['POST','GET']} -] -#sqlite配置 -sqlite['db']='kcwlicuxweb' #sqlite数据库文件 diff --git a/common/__init__.py b/common/__init__.py deleted file mode 100644 index 11ffe10..0000000 --- a/common/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from . autoload import * -from . import globals -from . import session -from . import request diff --git a/common/autoload.py b/common/autoload.py deleted file mode 100644 index 4b917d1..0000000 --- a/common/autoload.py +++ /dev/null @@ -1,323 +0,0 @@ -# -*- coding: utf-8 -*- -import time,hashlib,json,re,os,platform -import datetime as core_datetime -from kcweb import config -from kcweb.utill.dateutil.relativedelta import relativedelta as core_relativedelta -from kcweb.utill.db import mysql as kcwmysql -from kcweb.utill.db import mongodb as kcwmongodb -from kcweb.utill.db import sqlite as kcwsqlite -from kcweb.utill.cache import cache as kcwcache -from kcweb.utill.redis import redis as kcwredis -from kcweb.utill.http import Http -from kcweb.utill.queues import Queues -from kcweb.utill.db import model -from mako.template import Template as kcwTemplate -import smtplib -from email.mime.text import MIMEText -from email.utils import formataddr -from . import globals - -redis=kcwredis() -def send_mail(user,text="邮件内容",theme="邮件主题",recNick="收件人昵称"): - """发送邮件 - - 参数 user:接收邮件的邮箱地址 - - 参数 text:邮件内容 - - 参数 theme:邮件主题 - - 参数 recNick:收件人昵称 - - return Boolean类型 - """ - ret=True - if not theme: - theme=config.email['theme'] - if not recNick: - recNick=config.email['recNick'] - try: - msg=MIMEText(text,'plain','utf-8') - msg['From']=formataddr([config.email['sendNick'],config.email['sender']]) - msg['To']=formataddr([recNick,user]) - msg['Subject']=theme - - server=smtplib.SMTP_SSL("smtp.qq.com", 465) - server.login(config.email['sender'], config.email['pwd']) - server.sendmail(config.email['sender'],[user,],msg.as_string()) - server.quit() - except Exception: - ret=False - return ret -get_sysinfodesffafew=None -def get_sysinfo(): - """获取系统信息 - - return dict类型 - """ - global get_sysinfodesffafew - if get_sysinfodesffafew: - sysinfo=get_sysinfodesffafew - else: - sysinfo={} - sysinfo['platform']=platform.platform() #获取操作系统名称及版本号,'Linux-3.13.0-46-generic-i686-with-Deepin-2014.2-trusty' - sysinfo['version']=platform.version() #获取操作系统版本号,'#76-Ubuntu SMP Thu Feb 26 18:52:49 UTC 2015' - sysinfo['architecture']=platform.architecture() #获取操作系统的位数,('32bit', 'ELF') - sysinfo['machine']=platform.machine() #计算机类型,'i686' - sysinfo['node']=platform.node() #计算机的网络名称,'XF654' - sysinfo['processor']=platform.processor() #计算机处理器信息,''i686' - sysinfo['uname']=platform.uname() #包含上面所有的信息汇总,('Linux', 'XF654', '3.13.0-46-generic', '#76-Ubuntu SMP Thu Feb 26 18:52:49 UTC 2015', 'i686', 'i686') - sysinfo['start_time']=times() - get_sysinfodesffafew=sysinfo - # 还可以获得计算机中python的一些信息: - # import platform - # platform.python_build() - # platform.python_compiler() - # platform.python_branch() - # platform.python_implementation() - # platform.python_revision() - # platform.python_version() - # platform.python_version_tuple() - return sysinfo -def Template(path,**context): - "模板渲染引擎函数,使用配置的模板路径" - return Templates(str(config.app['tpl_folder'])+str(path),**context) -def Templates(path,**context): - "模板渲染引擎函数,需要完整的模板目录文件" - body='' - with open(path, 'r',encoding='utf-8') as f: - content=f.read() - t=kcwTemplate(content) - body=t.render(**context) - return body -def mysql(table=None,configss=None): - """mysql数据库操作实例 - - 参数 table:表名 - - 参数 configss 数据库配置 可以传数据库名字符串 - """ - dbs=kcwmysql.mysql() - if table is None: - return dbs - elif configss: - return dbs.connect(configss).table(table) - else: - return dbs.connect(config.database).table(table) -def sqlite(table=None,configss=None): - """sqlite数据库操作实例 - - 参数 table:表名 - - 参数 configss 数据库配置 可以传数据库名字符串 - """ - dbs=kcwsqlite.sqlite() - if table is None: - return dbs - elif configss: - return dbs.connect(configss).table(table) - else: - return dbs.connect(config.sqlite).table(table) -def M(table=None,confi=None): - """数据库操作实例 - - 参数 table:表名 - - 参数 confi 数据库配置 可以传数据库名字符串 - """ - if confi: - if confi['type']=='sqlite': - return sqlite(table,confi) - else: - return mysql(table,confi) - else: - if config.database['type']=='sqlite': - return sqlite(table) - else: - return mysql(table) -def mongo(table=None,configss=None): - """mongodb数据库操作实例 - - 参数 table:表名(mongodb数据库集合名) - - 参数 configss mongodb数据库配置 可以传数据库名字符串 - """ - mObj=kcwmongodb.mongo() - if table is None: - return mObj - elif configss: - return mObj.connect(configss).table(table) - else: - return mObj.connect(config.mongo).table(table) -def is_index(params,index): - """判断列表或字典里的索引是否存在 - - params 列表或字典 - - index 索引值 - - return Boolean类型 - """ - try: - params[index] - except KeyError: - return False - except IndexError: - return False - else: - return True -def set_cache(name,values,expire="no"): - """设置缓存 - - 参数 name:缓存名 - - 参数 values:缓存值 - - 参数 expire:缓存有效期 0表示永久 单位 秒 - - return Boolean类型 - """ - return kcwcache.cache().set_cache(name,values,expire) -def get_cache(name): - """获取缓存 - - 参数 name:缓存名 - - return 或者的值 - """ - return kcwcache.cache().get_cache(name) -def del_cache(name): - """删除缓存 - - 参数 name:缓存名 - - return Boolean类型 - """ - return kcwcache.cache().del_cache(name) -def md5(strs): - """md5加密 - - 参数 strs:要加密的字符串 - - return String类型 - """ - m = hashlib.md5() - b = strs.encode(encoding='utf-8') - m.update(b) - return m.hexdigest() -def times(): - """生成时间戳整数 精确到秒(10位数字) - - return int类型 - """ - return int(time.time()) -def json_decode(strs): - """json字符串转python类型""" - try: - return json.loads(strs) - except Exception: - return {} -def json_encode(strs): - """python列表或字典转成字符串""" - try: - return json.dumps(strs,ensure_ascii=False) - except Exception: - return "" -def dateoperator(date,years=0,formats='%Y%m%d%H%M%S',months=0, days=0, hours=0, minutes=0,seconds=0, - leapdays=0, weeks=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - """日期相加减计算 - date 2019-10-10 - formats 设置需要返回的时间格式 默认%Y%m%d%H%M%S - - years 大于0表示加年 反之减年 - months 大于0表示加月 反之减月 - days 大于0表示加日 反之减日 - - return %Y%m%d%H%M%S - """ - formatss='%Y%m%d%H%M%S' - date=re.sub('[-年/月::日 时分秒]','',date) - if len(date) < 8: - return None - if len(date) < 14: - s=14-len(date) - i=0 - while i < s: - date=date+"0" - i=i+1 - d = core_datetime.datetime.strptime(date, formatss) - strs=(d + core_relativedelta(years=years,months=months, days=days, hours=hours, minutes=minutes,seconds=seconds, - leapdays=leapdays, weeks=weeks, microseconds=microseconds, - year=year, month=month, day=day, weekday=weekday, - yearday=yearday, nlyearday=nlyearday, - hour=hour, minute=minute, second=second, microsecond=microsecond)) - strs=strs.strftime(formats) - return strs -def get_folder(): - '获取当前框架所在目录' - path=os.path.split(os.path.realpath(__file__))[0] #当前文件目录 - framepath=path.split('\\') ##框架主目录 - s='' - for k in framepath: - s=s+'/'+k - framepath=s[1:] - return re.sub('/kcw/common','',framepath) #包所在目录 -# aa=[] -def get_file(folder='./',is_folder=True,suffix="*",lists=[],append=False): - """获取文件夹下所有文件夹和文件 - - folder 要获取的文件夹路径 - - is_folder 是否返回列表中包含文件夹 - - suffix 获取指定后缀名的文件 默认全部 - """ - if not append: - lists=[] - lis=os.listdir(folder) - for files in lis: - if not os.path.isfile(folder+"/"+files): - if is_folder: - zd={"type":"folder","path":folder+"/"+files,'name':files} - lists.append(zd) - get_file(folder+"/"+files,is_folder,suffix,lists,append=True) - else: - if suffix=='*': - zd={"type":"file","path":folder+"/"+files,'name':files} - lists.append(zd) - else: - if files[-(len(suffix)+1):]=='.'+str(suffix): - zd={"type":"file","path":folder+"/"+files,'name':files} - lists.append(zd) - return lists - -def list_to_tree(data, pk = 'id', pid = 'pid', child = 'lowerlist', root=0,childstatus=True): - """列表转换tree - - data 要转换的列表 - - pk 关联节点字段 - - pid 父节点字段 - - lowerlist 子节点列表 - - root 主节点值 - - childstatus 当子节点列表为空时是否需要显示子节点字段 - """ - arr = [] - for v in data: - if v[pid] == root: - kkkk=list_to_tree(data,pk,pid,child,v[pk],childstatus) - if childstatus: - # print(kkkk) - v[child]=kkkk - else: - if kkkk: - v[child]=kkkk - arr.append(v) - return arr \ No newline at end of file diff --git a/common/globals.py b/common/globals.py deleted file mode 100644 index 64ee2a0..0000000 --- a/common/globals.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -from threading import local -##普通全局变量 请求结束后面删除 -VAR = local() -HEADER = local() -G = local() - - - diff --git a/common/request.py b/common/request.py deleted file mode 100644 index 3b2582a..0000000 --- a/common/request.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -from kcweb.common import globals as kcwglobals -from urllib import parse -import json -class args: - "获取url" - def get(name): - params = parse.parse_qs(parse.urlparse(kcwglobals.HEADER.URL).query) - try: - k=params[name][0] - except: - k=None - return k -class froms: - "获取from" - def get(name): - data=kcwglobals.HEADER.BODY_DATA - params = parse.parse_qs(parse.urlparse("?"+str(data)).query) - # print(params) - try: - k=parse.unquote(params[name][0]) - except: - k=None - return k -class HEADER: - def Method(): - return kcwglobals.HEADER.Method - def URL(): - return kcwglobals.HEADER.URL - def PATH_INFO(): - return kcwglobals.HEADER.PATH_INFO - def SERVER_PROTOCOL(): - return kcwglobals.HEADER.SERVER_PROTOCOL - def HTTP_HOST(): - return kcwglobals.HEADER.HTTP_HOST -def get_data(): - "获取请求参数体" - return kcwglobals.HEADER.BODY_DATA -def get_json(): - "获取请求参数体json" - try: - return json.loads(kcwglobals.HEADER.BODY_DATA) - except: - return None diff --git a/common/session.py b/common/session.py deleted file mode 100644 index 373ca2f..0000000 --- a/common/session.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -from kcweb.config import session as kcwsession -from kcweb.common import globals as kcwglobals -import time,random,hashlib -from kcweb.utill.cache import cache as kcwcache -from datetime import datetime -def __md5(strs): - m = hashlib.md5() - m.update(strs.encode()) - return m.hexdigest() -def set(name,value,expire=None): - "设置session" - if not expire: - expire=kcwsession['expire'] - HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE - SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ####### - try: - HTTP_COOKIE=HTTP_COOKIE.split(";") - except: - token=None - else: - token=None - for k in HTTP_COOKIE: - if SESSIONID in k: - token=k.split("=")[1] - if not token: - strs="kcw"+str(time.time())+str(random.randint(0,9)) - token=__md5(strs) - # print(token) - kcwglobals.set_cookie=SESSIONID+"="+token+";expires="+datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')+"; Max-Age=%d;Path=/" % (int(expire)*10) - kcwcache.cache().set_config(kcwsession).set_cache(token,value,expire) - return True -def get(name): - "获取session" - HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE - try: - HTTP_COOKIE=HTTP_COOKIE.split(";") - except: - return None - SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ######### - token='' - for k in HTTP_COOKIE: - if SESSIONID in k: - token=k.split("=")[1] - v=kcwcache.cache().set_config(kcwsession).get_cache(token) - return v -def rm(name): - "删除session" - HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE - try: - HTTP_COOKIE=HTTP_COOKIE.split(";") - except: - return None - SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ####### - token='' - for k in HTTP_COOKIE: - if SESSIONID in k: - token=k.split("=")[1] - kcwcache.cache().set_config(kcwsession).del_cache(token) - kcwglobals.set_cookie=SESSIONID+"="+token+";expires="+datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')+"; Max-Age=2" - return True - diff --git a/config/__init__.py b/config/__init__.py deleted file mode 100644 index 554752a..0000000 --- a/config/__init__.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -# 应用配置 -app={} -app['app_debug']=True #是否开启调试模式 -app['tpl_folder']='./tpl' #设置模板文件目录名 注意:不能配置目录路径 -app['before_request']='' #设置请求前执行的函数 -app['after_request']='' #设置请求后执行的函数 -app['staticpath']='static' -# redis配置 -redis={} -redis['host']='127.0.0.1' #服务器地址 -redis['port']=6379 #端口 -redis['password']='' #密码 -redis['db']=0 #Redis数据库 注:Redis用0或1或2等表示 -redis['pattern']=True # True连接池链接 False非连接池链接 -redis['ex']=0 #过期时间 (秒) - -#缓存配置 -cache={} -cache['type']='File' #驱动方式 支持 File Redis -cache['path']='runtime/cachepath' #缓存保存目录 -cache['expire']=120 #缓存有效期 0表示永久缓存 -cache['host']=redis['host'] #Redis服务器地址 -cache['port']=redis['port'] #Redis 端口 -cache['password']=redis['password'] #Redis登录密码 -cache['db']=1 #Redis数据库 注:Redis用1或2或3等表示 - -# session配置 -session={} -session['type']='File' #session 存储类型 支持 file、Redis -session['path']='runtime/session' #session缓存目录 -session['expire']=86400 #session默认有效期 该时间是指session在服务的保留时间,通常情况下浏览器上会保留该值的10倍 -session['prefix']="KCW" # SESSION 前缀 -session['host']=redis['host'] #Redis服务器地址 -session['port']=redis['port'] #Redis 端口 -session['password']=redis['password'] #Redis登录密码 -session['db']=2 #Redis数据库 注:Redis用1或2或3等表示 - - -# 默认数据库配置 -database={} -database['type']='mysql' # 数据库类型 目前支持mysql和sqlite -database['host']=['127.0.0.1']#服务器地址 [地址1,地址2,地址3...] 多个地址分布式(主从服务器)下有效 -database['port']=[3306] #端口 [端口1,端口2,端口3...] -database['user']=['root'] #用户名 [用户名1,用户名2,用户名3...] -database['password']=['root'] #密码 [密码1,密码2,密码3...] -database['db']=['test'] #数据库名 [数据库名1,数据库名2,数据库名3...] -database['charset']='utf8' #数据库编码默认采用utf8 -database['pattern']=False # True数据库长连接模式 False数据库短连接模式 注:建议web应用使用短连接,cli应用使用长连接 -database['cli']=False # 是否以cli方式运行 -database['dbObjcount']=1 # 连接池数量(单个数据库地址链接数量),数据库链接实例数量 mysql长链接模式下有效 -database['deploy']=0 # 数据库部署方式:0 集中式(单一服务器),1 分布式(主从服务器) mysql数据库有效 -database['master_num']=1 #主服务器数量 不能超过host服务器数量 (等于服务器数量表示读写不分离:主主复制。 小于服务器表示读写分离:主从复制。) mysql数据库有效 -database['master_dql']=False #主服务器是否可以执行dql语句 是否可以执行select语句 主服务器数量大于等于host服务器数量时必须设置True -database['break']=0 #断线重连次数,0表示不重连。 注:cli模式下 10秒进行一次重连并且连接次数是当前配置的300倍 - -#sqlite配置 -sqlite={} -sqlite['db']='kcwdb' # 数据库文件存放地址 - -#mongodb配置 -mongo={} -mongo['host']='127.0.0.1' -mongo['port']='27017' -mongo['user']='' -mongo['password']='' -mongo['db']='test' -mongo['retryWrites']=False #是否支持重新写入 - - - -#路由配置 -route={} -route['default']=True -route['modular']='' -route['edition']='' -route['files']='index' #默认路由文件 -route['funct']='index' #默认路由函数 -route['methods']=['POST','GET'] #默认请求方式 -route['children']=[] -#email配置 -email={} -email['sender']='' #发件人邮箱账号 -email['pwd']='' #发件人邮箱密码(如申请的smtp给的口令) -email['sendNick']='' #发件人昵称 -email['theme']='' #默认主题 -email['recNick']='' #默认收件人昵称 - -kcweb={} -kcweb['name']='kcweb' #项目的名称 -kcweb['version']='2.40.7' #项目版本 -kcweb['description']='基于python后端开发框架' #项目的简单描述 -kcweb['long_description']='' #项目详细描述 -kcweb['license']='MIT' #开源协议 mit开源 -kcweb['url']='http://intapp.kwebapp.cn/index/index/doc/docde/1' -kcweb['author']='禄可集团-坤坤' #名字 -kcweb['author_email']='fk1402936534@qq.com' #邮件地址 -kcweb['maintainer']='坤坤' #维护人员的名字 -kcweb['maintainer_email']='fk1402936534@qq.com' #维护人员的邮件地址 - -#其他配置 -other={} - diff --git a/create.py b/create.py deleted file mode 100644 index 036c060..0000000 --- a/create.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -import os,re,traceback,shutil,platform,sys -from mako.template import Template as kcwTemplate -def Template(path,**context): - body='' - with open(str(path), 'r',encoding='utf-8') as f: - content=f.read() - t=kcwTemplate(content) - body=t.render(**context) - return body -class create: - appname=None - modular=None - path=os.path.split(os.path.realpath(__file__))[0] #当前文件目录 - def __init__(self,appname="application",modular="api"): - self.appname=appname - self.modular=modular - if not os.path.exists(self.appname): - os.makedirs(self.appname) - if not os.path.exists(self.appname+"/common"): - os.makedirs(self.appname+"/common") - f=open(self.appname+"/common/__init__.py","w+",encoding='utf-8') - content=Template(self.path+"/application/common/__init__.py",appname=appname,modular=modular) - f.write(content) - f.close() - f=open(self.appname+"/common/autoload.py","w+",encoding='utf-8') - content=Template(self.path+"/application/common/autoload.py",appname=appname,modular=modular) - f.write(content) - f.close() - if not os.path.exists(self.appname+"/config"): - os.makedirs(self.appname+"/config") - f=open(self.appname+"/config/__init__.py","w+",encoding='utf-8') - content=Template(self.path+"/application/config/__init__.py",appname=appname,modular=modular) - f.write(content) - f.close() - f=open(self.appname+"/config/other.py","w+",encoding='utf-8') - content=Template(self.path+"/application/config/other.py",appname=appname,modular=modular) - f.write(content) - f.close() - if not os.path.exists(self.appname+"/"+self.modular): #创建模块 - os.makedirs(self.appname+"/"+self.modular) - self.zxmodular("") - #在应用目录下创建初始化文件 - lists=os.listdir(self.appname) - modulars=[] - filters=['__init__','__pycache__','common','config','runtime','log'] - for files in lists: - if not os.path.isfile(self.appname+"/"+files): - if files not in filters: - modulars.append(files) - f=open(self.appname+"/__init__.py","w+",encoding='utf-8') - content=Template(self.path+"/application/__init__.py",appname=appname,tuple_modular=modulars) - f.write(content) - f.close() - if "Windows" in platform.platform(): - pythonname="python" - else: - pythonname="python3" - sys.argv[0]=re.sub('.py','',sys.argv[0]) - content=('# #gunicorn -b 0.0.0.0:39010 '+self.appname+':app\n'+ - 'from kcweb import web\n'+ - 'import '+self.appname+' as application\n'+ - 'from '+self.appname+'.common import *\n'+ - 'Queues.start() #开启队列监听\n'+ - 'app=web(__name__,application)\n'+ - 'if __name__ == "__main__":\n'+ - ' #host监听ip port端口 name python解释器名字 (windows一般是python linux一般是python3)\n'+ - ' app.run(host="0.0.0.0",port="39001",name="'+pythonname+'")') - f=open("./"+sys.argv[0]+".py","w+",encoding='utf-8') - f.write(content) - f.close() - def zxmodular(self,sourcep): - "处理模块文件" - path1=self.path+"/application/api"+sourcep - path2=self.appname+"/"+self.modular+sourcep - lists=os.listdir(path1) - for files in lists: - if os.path.isfile(path1+"/"+files): - if ".py" in files: - content=Template(path1+"/"+files,appname=self.appname,modular=self.modular) - f=open(path2+"/"+files,"w+",encoding='utf-8') - f.write(content) - f.close() - else: - f=open(path1+"/"+files,"r",encoding='utf-8') - content=f.read() - f.close() - f=open(path2+"/"+files,"w+",encoding='utf-8') - f.write(content) - f.close() - elif files != '__pycache__': - if not os.path.exists(path2+"/"+files): - os.makedirs(path2+"/"+files) - self.zxmodular(sourcep+"/"+files) - diff --git a/tpl/error.html b/tpl/error.html deleted file mode 100644 index e23605b..0000000 --- a/tpl/error.html +++ /dev/null @@ -1,27 +0,0 @@ - - - -${title} - - - - - - -
- % if e: -

${e}

- % endif - % if isinstance(data,str): - ${data} - % elif isinstance(data,list): -
    - % for i in data: -
  • ${i}
  • - % endfor -
- % endif - -
- - \ No newline at end of file diff --git a/utill/Queues b/utill/Queues deleted file mode 100644 index 59a2485c5ea3ecbb2fa749a1825af7000325d298..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2O>9(E6o6-FLrY;=V`7k%Z)1c=%>Dma5UtM{K}w-@s)7pMpL4O1g0y9!OQT8g zCvjz5m>8mo#soHQBqYWdL_jdcts57BuyW(VrDrU?wl%NDExea@+L_$<&AHz_=iEDK zpPm?-o`)hGo1Jm<1#jHbXf_*<6-A@b9BDKfefYg}HSw4Jr6>5a_xwLDWIS^J#TJfQ zH+-}){0%kK31qSx5Tfh0?hnxH7oj(F|$B*Dg z&qH%?`~(~gtDp9jlkL%NyXcNSKGrT)joo_G&A@iNbK4s`ngg4+Y-zqZ^*3|k1e}1m z#m99k2#c>3Tc=~OYrNZjvOQUh@9h@jQ)6SrzR6vCMkn_dyW9JV(W&m1FgxvGE)Qlz zn2N#SScDwz()+SyZ#y%0xahW@!+G4E%k~0hXOGRUe$mA^KRxpgYianl*6^)H|M02C z$XCOsMqVA+I{fSKhr|0?Keyg%?QHdJ`g+rAo5aT7H-51ZHWov_41F*(F|=vJ_Z!~W zuw(Gg!7m4o4sIK`Ht_Mlfr0zh|Fr&{^_{yQiR&rW25JMffjhuJ=iUvCJrA~;54QRZ zv&zUQlpsa{5sW(#n9`O;8725z=RUl7=F@BE&Ru`|RR4{WpWQt5?)5Y8-dK45;Mu+N zC12z`(CViwBxfx1fl41WI3rCo;8_Zs2q~6x*8O(+TNv#idFldCnD$6_Iq>*$F`<5VRLoad06; z&J0sjGhwKXiYzCra2o9zv~$66qa}}2#ALWe6c>soEx;}3EO)Z-W9c%4Y7&yP zy?8V+1|p5tr2tYVW*MQCkgMfO8KSiITuCaqP8bW2$f?u}1WVE>R^{aE!qxInhKLX_ zaw)A6l%dO*AVemZ)q)u%_!ta6DLu9Pok2tdM< zmNwonrxleZMM8u0GEoWwcx5D4$`>+38by+z1T;gE4M7Hnel2BMP!+3J=Sul}hUg%I zVG{86knk}%Cp7Vb0L8p@D^g{}5??6~WQg~;ERiLTDF)!EkCAwyl2^<}%>K&h{7~-C z5G^0BzO zG9+nZ9A+zE{Zf?-#mbfh3=k`+qoFdAOJz4hA{Wv-pR^)M0Y(KhVYeg(%_bkJlkH;p zbcSHYYMeghSoX$5qLndB;{+{Q0a=}F7t6^E2`*u8LkKGH9we4BE-j6oAj*pB+PPRx zWC%+N`;kO|37k@936~_LV2l^ex@srim-{lLSG&$Bhz^^hBHm)3lpI1J3>c-=)45RY z&5$UJRxu_UsA7_O;&FYwO1)x^T2+g^P>yE^=eTe6s+X~7h?9Z|&Xf`yt0)o2Dwp`X za!-Z`f=NP+N=FcMj(;{?F~J!>(Jck)fTN75) z&c8V^b8wdmo=`y|#U@KU1m`hvP2{*#WM%oR=Dghd+kA86j|P6#S8bp+P#dTX)COt; gwSn3|ZJ;(#8>kJ`25JL$kb$9Q-1): - s=re.sub('/','',kk[0]) - if s==requeststr[0:len(s)]: - data=True - break - else: - routestr= re.sub('/','',k) - if routestr==requeststr: - data=True - break - for k in config.oauth['unwanted']: - if k == '*': - data=False - kk=k.split("*") - if(len(kk)>1): - s=re.sub('/','',kk[0]) - if s==requeststr[0:len(s)]: - data=False - break - else: - routestr= re.sub('/','',k) - if routestr==requeststr: - data=False - break - return data \ No newline at end of file diff --git a/utill/cache/cache.py b/utill/cache/cache.py deleted file mode 100644 index 17c87f1..0000000 --- a/utill/cache/cache.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -import os,sys,time,hashlib,json -from kcweb import config -from kcweb.utill import redis -from kcweb.utill.db.mysql import mysql - -import time,hashlib -def md5(strs): - """md5加密""" - if not strs: - return strs - m = hashlib.md5() - b = strs.encode(encoding='utf-8') - m.update(b) - return m.hexdigest() -def times(): - """时间戳 精确到秒""" - return int(time.time()) -def json_decode(jsonstr): - """json字符串转python类型""" - try: - return eval(jsonstr) - except Exception: - return {} -# def json_decode(strs): -# """json字符串转python类型""" -# try: -# return json.loads(strs) -# except Exception: -# return {} -class cache: - "开发完善中..." - __name=None - __values=None - __cachepath='' #os.path.split(os.path.realpath(__file__))[0]+'/../../../' - __config=config.cache - __redisobj=None - __mysqlobj=None - def __setmysqlonj(self): - conf=config.database - if 'host' in self.__config and self.__config['host']: - conf['host']=[self.__config['host']] - if 'port' in self.__config and self.__config['port']: - conf['port']=[self.__config['port']] - if 'user' in self.__config and self.__config['user']: - conf['user']=[self.__config['user']] - if 'password' in self.__config and self.__config['password']: - conf['password']=[self.__config['password']] - if 'db' in self.__config and self.__config['db']: - conf['db']=[self.__config['db']] - db=mysql() - self.__mysqlobj=db.connect(conf) - def __setredisobj(self): - "设置redis链接实例" - conf=config.redis - if 'host' in self.__config and self.__config['host']: - conf['host']=self.__config['host'] - if 'port' in self.__config and self.__config['port']: - conf['port']=self.__config['port'] - if 'password' in self.__config and self.__config['password']: - conf['password']=self.__config['password'] - if 'db' in self.__config and self.__config['db']: - conf['db']=self.__config['db'] - if conf['pattern']: - if conf['password']: - redis_pool=redis.ConnectionPool(host=conf['host'],password=conf['password'],port=conf['port'],db=conf['db']) - else: - redis_pool=redis.ConnectionPool(host=conf['host'],port=conf['port'],db=conf['db']) - self.__redisobj=redis.Redis(connection_pool=redis_pool) - else: - if conf['password']: - self.__redisobj=redis.Redis(host=conf['host'],password=conf['password'],port=conf['port'],db=conf['db']) - else: - self.__redisobj=redis.Redis(host=conf['host'],port=conf['port'],db=conf['db']) - def set_cache(self,name,values,expire = 'no'): - """设置缓存 - - 参数 name:缓存名 - - 参数 values:缓存值 - - 参数 expire:缓存有效期 0表示永久 单位 秒 - - return Boolean类型 - """ - # print(name) - # exit() - self.__name=name - self.__values=values - if expire != 'no': - self.__config['expire']=int(expire) - return self.__seltype('set') - def get_cache(self,name): - """获取缓存 - - return 或者的值 - """ - self.__name=name - return self.__seltype('get') - def del_cache(self,name): - """删除缓存 - - return Boolean类型 - """ - self.__name=name - return self.__seltype('del') - def set_config(self,congig): - """设置缓存配置 - """ - self.__config=congig - return self - - - def __seltype(self,types): - """选择缓存""" - # m = hashlib.md5() - # b = self.__name.encode(encoding='utf-8') - # m.update(b) - self.__name=md5(self.__name) - if self.__config['type'] == 'File': - if types == 'set': - return self.__setfilecache() - elif types=='get': - return self.__getfilecache() - elif types=='del': - return self.__delfilecache() - elif self.__config['type'] == 'Redis': - self.__setredisobj() - if types == 'set': - return self.__setrediscache() - elif types=='get': - return self.__getrediscache() - elif types=='del': - return self.__delrediscache() - elif self.__config['type'] == 'MySql': - self.__setmysqlonj() - if types == 'set': - return self.__setmysqlcache() - elif types == 'get': - return self.__getmysqlcache() - elif types == 'del': - return self.__delmysqlcache() - def __setmysqlcache(self): ######################################################################################## - """设置mysql缓存 - - return Boolean类型 - """ - data=[str(self.__values)] - strs="[" - for k in data: - strs=strs+k - strs=strs+"]" - k=self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).count('id') - self.__setmysqlonj() - if k: - return self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).update({"val":strs,"expire":self.__config['expire'],"time":times()}) - else: - return self.__mysqlobj.table('fanshukeji_core_cache').insert({"name":self.__name,"val":strs,"expire":self.__config['expire'],"time":times()}) - def __getmysqlcache(self): - """获取mysql缓存 - - return 缓存的值 - """ - data=self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).find() - if data : - if data['expire']>0 and times()-data['time']>data['expire']: - self.__setmysqlonj() - self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).delete() - return False - else: - return eval(data['val'])[0] - else: - return False - def __delmysqlcache(self): - """删除mysql缓存 - - return Boolean类型 - """ - return self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).delete() - def __setrediscache(self): - """设置redis缓存 - - return Boolean类型 - """ - # print(self.__redisobj) - data=[self.__values] - try: - if self.__config['expire']: - self.__redisobj.set(self.__name,str(data),self.__config['expire']) - else: - self.__redisobj.set(self.__name,str(data)) - except: - return False - return True - def __getrediscache(self): - """获取redis缓存 - - return 缓存的值 - """ - lists=self.__redisobj.get(self.__name) - if lists: - data=eval(lists) - return data[0] - else: - return False - def __delrediscache(self): - """删除redis缓存 - - return int类型 - """ - return self.__redisobj.delete(self.__name) - def __setfilecache(self): - """设置文件缓存 - - return Boolean类型 - """ - data={ - 'expire':self.__config['expire'], - 'time':times(), - 'values':self.__values - } - if not os.path.exists(self.__config['path']): - os.makedirs(self.__config['path']) #多层创建目录 - f=open(self.__config['path']+"/"+self.__name,"w") - f.write(str(data)) - f.close() - return True - def __getfilecache(self): - """获取文件缓存 - - return 缓存的值 - """ - try: - f=open(self.__config['path']+"/"+self.__name,"r") - except Exception: - return None - json_str=f.read() - f.close() - ar=json_decode(json_str) - - if ar['expire'] > 0: - if (times()-ar['time']) > ar['expire']: - - self.__delfilecache() - return None - else: - return ar['values'] - else: - return ar['values'] - def __delfilecache(self): - """删除文件缓存 - - return Boolean类型 - """ - if not os.path.exists(self.__config['path']+"/"+self.__name): - return True - try: - os.remove(self.__config['path']+"/"+self.__name) - except: - return False - return True \ No newline at end of file diff --git a/utill/dateutil/__init__.py b/utill/dateutil/__init__.py deleted file mode 100644 index 0defb82..0000000 --- a/utill/dateutil/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -try: - from ._version import version as __version__ -except ImportError: - __version__ = 'unknown' - -__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', - 'utils', 'zoneinfo'] diff --git a/utill/dateutil/_common.py b/utill/dateutil/_common.py deleted file mode 100644 index 4eb2659..0000000 --- a/utill/dateutil/_common.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Common code used in multiple modules. -""" - - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __hash__(self): - return hash(( - self.weekday, - self.n, - )) - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -# vim:ts=4:sw=4:et diff --git a/utill/dateutil/_version.py b/utill/dateutil/_version.py deleted file mode 100644 index 670d7ab..0000000 --- a/utill/dateutil/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '2.8.0' diff --git a/utill/dateutil/easter.py b/utill/dateutil/easter.py deleted file mode 100644 index 53b7c78..0000000 --- a/utill/dateutil/easter.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic easter computing method for any given year, using -Western, Orthodox or Julian algorithms. -""" - -import datetime - -__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] - -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 - - -def easter(year, method=EASTER_WESTERN): - """ - This method was ported from the work done by GM Arts, - on top of the algorithm by Claus Tondering, which was - based in part on the algorithm of Ouding (1940), as - quoted in "Explanatory Supplement to the Astronomical - Almanac", P. Kenneth Seidelmann, editor. - - This algorithm implements three different easter - calculation methods: - - 1 - Original calculation in Julian calendar, valid in - dates after 326 AD - 2 - Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 3 - Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - - These methods are represented by the constants: - - * ``EASTER_JULIAN = 1`` - * ``EASTER_ORTHODOX = 2`` - * ``EASTER_WESTERN = 3`` - - The default method is method 3. - - More about the algorithm may be found at: - - `GM Arts: Easter Algorithms `_ - - and - - `The Calendar FAQ: Easter `_ - - """ - - if not (1 <= method <= 3): - raise ValueError("invalid method") - - # g - Golden year - 1 - # c - Century - # h - (23 - Epact) mod 30 - # i - Number of days from March 21 to Paschal Full Moon - # j - Weekday for PFM (0=Sunday, etc) - # p - Number of days from March 21 to Sunday on or before PFM - # (-6 to 28 methods 1 & 3, to 56 for method 2) - # e - Extra days to add for method 2 (converting Julian - # date to Gregorian date) - - y = year - g = y % 19 - e = 0 - if method < 3: - # Old method - i = (19*g + 15) % 30 - j = (y + y//4 + i) % 7 - if method == 2: - # Extra dates to convert Julian to Gregorian date - e = 10 - if y > 1600: - e = e + y//100 - 16 - (y//100 - 16)//4 - else: - # New method - c = y//100 - h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 - i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) - j = (y + y//4 + i + 2 - c + c//4) % 7 - - # p can be from -6 to 56 corresponding to dates 22 March to 23 May - # (later dates apply to method 2, although 23 May never actually occurs) - p = i - j + e - d = 1 + (p + 27 + (p + 6)//40) % 31 - m = 3 + (p + 26)//30 - return datetime.date(int(y), int(m), int(d)) diff --git a/utill/dateutil/parser/__init__.py b/utill/dateutil/parser/__init__.py deleted file mode 100644 index 216762c..0000000 --- a/utill/dateutil/parser/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -from ._parser import parse, parser, parserinfo -from ._parser import DEFAULTPARSER, DEFAULTTZPARSER -from ._parser import UnknownTimezoneWarning - -from ._parser import __doc__ - -from .isoparser import isoparser, isoparse - -__all__ = ['parse', 'parser', 'parserinfo', - 'isoparse', 'isoparser', - 'UnknownTimezoneWarning'] - - -### -# Deprecate portions of the private interface so that downstream code that -# is improperly relying on it is given *some* notice. - - -def __deprecated_private_func(f): - from functools import wraps - import warnings - - msg = ('{name} is a private function and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=f.__name__) - - @wraps(f) - def deprecated_func(*args, **kwargs): - warnings.warn(msg, DeprecationWarning) - return f(*args, **kwargs) - - return deprecated_func - -def __deprecate_private_class(c): - import warnings - - msg = ('{name} is a private class and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=c.__name__) - - class private_class(c): - __doc__ = c.__doc__ - - def __init__(self, *args, **kwargs): - warnings.warn(msg, DeprecationWarning) - super(private_class, self).__init__(*args, **kwargs) - - private_class.__name__ = c.__name__ - - return private_class - - -from ._parser import _timelex, _resultbase -from ._parser import _tzparser, _parsetz - -_timelex = __deprecate_private_class(_timelex) -_tzparser = __deprecate_private_class(_tzparser) -_resultbase = __deprecate_private_class(_resultbase) -_parsetz = __deprecated_private_func(_parsetz) diff --git a/utill/dateutil/parser/_parser.py b/utill/dateutil/parser/_parser.py deleted file mode 100644 index 0da0f3e..0000000 --- a/utill/dateutil/parser/_parser.py +++ /dev/null @@ -1,1580 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic date/time string parser which is able to parse -most known formats to represent a date and/or time. - -This module attempts to be forgiving with regards to unlikely input formats, -returning a datetime object even for dates which are ambiguous. If an element -of a date/time stamp is omitted, the following rules are applied: - -- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour - on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is - specified. -- If a time zone is omitted, a timezone-naive datetime is returned. - -If any other elements are missing, they are taken from the -:class:`datetime.datetime` object passed to the parameter ``default``. If this -results in a day number exceeding the valid number of days per month, the -value falls back to the end of the month. - -Additional resources about date/time string formats can be found below: - -- `A summary of the international standard date and time notation - `_ -- `W3C Date and Time Formats `_ -- `Time Formats (Planetary Rings Node) `_ -- `CPAN ParseDate module - `_ -- `Java SimpleDateFormat Class - `_ -""" -from __future__ import unicode_literals - -import datetime -import re -import string -import time -import warnings - -from calendar import monthrange -from io import StringIO - -import six -from six import integer_types, text_type - -from decimal import Decimal - -from warnings import warn - -from .. import relativedelta -from .. import tz - -__all__ = ["parse", "parserinfo"] - - -# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth -# making public and/or figuring out if there is something we can -# take off their plate. -class _timelex(object): - # Fractional seconds are sometimes split by a comma - _split_decimal = re.compile("([.,])") - - def __init__(self, instream): - if six.PY2: - # In Python 2, we can't duck type properly because unicode has - # a 'decode' function, and we'd be double-decoding - if isinstance(instream, (bytes, bytearray)): - instream = instream.decode() - else: - if getattr(instream, 'decode', None) is not None: - instream = instream.decode() - - if isinstance(instream, text_type): - instream = StringIO(instream) - elif getattr(instream, 'read', None) is None: - raise TypeError('Parser must be a string or character stream, not ' - '{itype}'.format(itype=instream.__class__.__name__)) - - self.instream = instream - self.charstack = [] - self.tokenstack = [] - self.eof = False - - def get_token(self): - """ - This function breaks the time string into lexical units (tokens), which - can be parsed by the parser. Lexical units are demarcated by changes in - the character set, so any continuous string of letters is considered - one unit, any continuous string of numbers is considered one unit. - - The main complication arises from the fact that dots ('.') can be used - both as separators (e.g. "Sep.20.2009") or decimal points (e.g. - "4:30:21.447"). As such, it is necessary to read the full context of - any dot-separated strings before breaking it into tokens; as such, this - function maintains a "token stack", for when the ambiguous context - demands that multiple tokens be parsed at once. - """ - if self.tokenstack: - return self.tokenstack.pop(0) - - seenletters = False - token = None - state = None - - while not self.eof: - # We only realize that we've reached the end of a token when we - # find a character that's not part of the current token - since - # that character may be part of the next token, it's stored in the - # charstack. - if self.charstack: - nextchar = self.charstack.pop(0) - else: - nextchar = self.instream.read(1) - while nextchar == '\x00': - nextchar = self.instream.read(1) - - if not nextchar: - self.eof = True - break - elif not state: - # First character of the token - determines if we're starting - # to parse a word, a number or something else. - token = nextchar - if self.isword(nextchar): - state = 'a' - elif self.isnum(nextchar): - state = '0' - elif self.isspace(nextchar): - token = ' ' - break # emit token - else: - break # emit token - elif state == 'a': - # If we've already started reading a word, we keep reading - # letters until we find something that's not part of a word. - seenletters = True - if self.isword(nextchar): - token += nextchar - elif nextchar == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0': - # If we've already started reading a number, we keep reading - # numbers until we find something that doesn't fit. - if self.isnum(nextchar): - token += nextchar - elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == 'a.': - # If we've seen some letters and a dot separator, continue - # parsing, and the tokens will be broken up later. - seenletters = True - if nextchar == '.' or self.isword(nextchar): - token += nextchar - elif self.isnum(nextchar) and token[-1] == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0.': - # If we've seen at least one dot separator, keep going, we'll - # break up the tokens later. - if nextchar == '.' or self.isnum(nextchar): - token += nextchar - elif self.isword(nextchar) and token[-1] == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - - if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or - token[-1] in '.,')): - l = self._split_decimal.split(token) - token = l[0] - for tok in l[1:]: - if tok: - self.tokenstack.append(tok) - - if state == '0.' and token.count('.') == 0: - token = token.replace(',', '.') - - return token - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token is None: - raise StopIteration - - return token - - def next(self): - return self.__next__() # Python 2.x support - - @classmethod - def split(cls, s): - return list(cls(s)) - - @classmethod - def isword(cls, nextchar): - """ Whether or not the next character is part of a word """ - return nextchar.isalpha() - - @classmethod - def isnum(cls, nextchar): - """ Whether the next character is part of a number """ - return nextchar.isdigit() - - @classmethod - def isspace(cls, nextchar): - """ Whether the next character is whitespace """ - return nextchar.isspace() - - -class _resultbase(object): - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def _repr(self, classname): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (classname, ", ".join(l)) - - def __len__(self): - return (sum(getattr(self, attr) is not None - for attr in self.__slots__)) - - def __repr__(self): - return self._repr(self.__class__.__name__) - - -class parserinfo(object): - """ - Class which handles what inputs are accepted. Subclass this to customize - the language and acceptable values for each parameter. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. Default is ``False``. - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - Default is ``False``. - """ - - # m from a.m/p.m, t from ISO T separator - JUMP = [" ", ".", ",", ";", "-", "/", "'", - "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] - - WEEKDAYS = [("Mon", "Monday"), - ("Tue", "Tuesday"), # TODO: "Tues" - ("Wed", "Wednesday"), - ("Thu", "Thursday"), # TODO: "Thurs" - ("Fri", "Friday"), - ("Sat", "Saturday"), - ("Sun", "Sunday")] - MONTHS = [("Jan", "January"), - ("Feb", "February"), # TODO: "Febr" - ("Mar", "March"), - ("Apr", "April"), - ("May", "May"), - ("Jun", "June"), - ("Jul", "July"), - ("Aug", "August"), - ("Sep", "Sept", "September"), - ("Oct", "October"), - ("Nov", "November"), - ("Dec", "December")] - HMS = [("h", "hour", "hours"), - ("m", "minute", "minutes"), - ("s", "second", "seconds")] - AMPM = [("am", "a"), - ("pm", "p")] - UTCZONE = ["UTC", "GMT", "Z", "z"] - PERTAIN = ["of"] - TZOFFSET = {} - # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", - # "Anno Domini", "Year of Our Lord"] - - def __init__(self, dayfirst=False, yearfirst=False): - self._jump = self._convert(self.JUMP) - self._weekdays = self._convert(self.WEEKDAYS) - self._months = self._convert(self.MONTHS) - self._hms = self._convert(self.HMS) - self._ampm = self._convert(self.AMPM) - self._utczone = self._convert(self.UTCZONE) - self._pertain = self._convert(self.PERTAIN) - - self.dayfirst = dayfirst - self.yearfirst = yearfirst - - self._year = time.localtime().tm_year - self._century = self._year // 100 * 100 - - def _convert(self, lst): - dct = {} - for i, v in enumerate(lst): - if isinstance(v, tuple): - for v in v: - dct[v.lower()] = i - else: - dct[v.lower()] = i - return dct - - def jump(self, name): - return name.lower() in self._jump - - def weekday(self, name): - try: - return self._weekdays[name.lower()] - except KeyError: - pass - return None - - def month(self, name): - try: - return self._months[name.lower()] + 1 - except KeyError: - pass - return None - - def hms(self, name): - try: - return self._hms[name.lower()] - except KeyError: - return None - - def ampm(self, name): - try: - return self._ampm[name.lower()] - except KeyError: - return None - - def pertain(self, name): - return name.lower() in self._pertain - - def utczone(self, name): - return name.lower() in self._utczone - - def tzoffset(self, name): - if name in self._utczone: - return 0 - - return self.TZOFFSET.get(name) - - def convertyear(self, year, century_specified=False): - """ - Converts two-digit years to year within [-50, 49] - range of self._year (current local time) - """ - - # Function contract is that the year is always positive - assert year >= 0 - - if year < 100 and not century_specified: - # assume current century to start - year += self._century - - if year >= self._year + 50: # if too far in future - year -= 100 - elif year < self._year - 50: # if too far in past - year += 100 - - return year - - def validate(self, res): - # move to info - if res.year is not None: - res.year = self.convertyear(res.year, res.century_specified) - - if ((res.tzoffset == 0 and not res.tzname) or - (res.tzname == 'Z' or res.tzname == 'z')): - res.tzname = "UTC" - res.tzoffset = 0 - elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): - res.tzoffset = 0 - return True - - -class _ymd(list): - def __init__(self, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - self.century_specified = False - self.dstridx = None - self.mstridx = None - self.ystridx = None - - @property - def has_year(self): - return self.ystridx is not None - - @property - def has_month(self): - return self.mstridx is not None - - @property - def has_day(self): - return self.dstridx is not None - - def could_be_day(self, value): - if self.has_day: - return False - elif not self.has_month: - return 1 <= value <= 31 - elif not self.has_year: - # Be permissive, assume leapyear - month = self[self.mstridx] - return 1 <= value <= monthrange(2000, month)[1] - else: - month = self[self.mstridx] - year = self[self.ystridx] - return 1 <= value <= monthrange(year, month)[1] - - def append(self, val, label=None): - if hasattr(val, '__len__'): - if val.isdigit() and len(val) > 2: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - elif val > 100: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - - super(self.__class__, self).append(int(val)) - - if label == 'M': - if self.has_month: - raise ValueError('Month is already set') - self.mstridx = len(self) - 1 - elif label == 'D': - if self.has_day: - raise ValueError('Day is already set') - self.dstridx = len(self) - 1 - elif label == 'Y': - if self.has_year: - raise ValueError('Year is already set') - self.ystridx = len(self) - 1 - - def _resolve_from_stridxs(self, strids): - """ - Try to resolve the identities of year/month/day elements using - ystridx, mstridx, and dstridx, if enough of these are specified. - """ - if len(self) == 3 and len(strids) == 2: - # we can back out the remaining stridx value - missing = [x for x in range(3) if x not in strids.values()] - key = [x for x in ['y', 'm', 'd'] if x not in strids] - assert len(missing) == len(key) == 1 - key = key[0] - val = missing[0] - strids[key] = val - - assert len(self) == len(strids) # otherwise this should not be called - out = {key: self[strids[key]] for key in strids} - return (out.get('y'), out.get('m'), out.get('d')) - - def resolve_ymd(self, yearfirst, dayfirst): - len_ymd = len(self) - year, month, day = (None, None, None) - - strids = (('y', self.ystridx), - ('m', self.mstridx), - ('d', self.dstridx)) - - strids = {key: val for key, val in strids if val is not None} - if (len(self) == len(strids) > 0 or - (len(self) == 3 and len(strids) == 2)): - return self._resolve_from_stridxs(strids) - - mstridx = self.mstridx - - if len_ymd > 3: - raise ValueError("More than three YMD values") - elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): - # One member, or two members with a month string - if mstridx is not None: - month = self[mstridx] - # since mstridx is 0 or 1, self[mstridx-1] always - # looks up the other element - other = self[mstridx - 1] - else: - other = self[0] - - if len_ymd > 1 or mstridx is None: - if other > 31: - year = other - else: - day = other - - elif len_ymd == 2: - # Two members with numbers - if self[0] > 31: - # 99-01 - year, month = self - elif self[1] > 31: - # 01-99 - month, year = self - elif dayfirst and self[1] <= 12: - # 13-01 - day, month = self - else: - # 01-13 - month, day = self - - elif len_ymd == 3: - # Three members - if mstridx == 0: - if self[1] > 31: - # Apr-2003-25 - month, year, day = self - else: - month, day, year = self - elif mstridx == 1: - if self[0] > 31 or (yearfirst and self[2] <= 31): - # 99-Jan-01 - year, month, day = self - else: - # 01-Jan-01 - # Give precendence to day-first, since - # two-digit years is usually hand-written. - day, month, year = self - - elif mstridx == 2: - # WTF!? - if self[1] > 31: - # 01-99-Jan - day, year, month = self - else: - # 99-01-Jan - year, day, month = self - - else: - if (self[0] > 31 or - self.ystridx == 0 or - (yearfirst and self[1] <= 12 and self[2] <= 31)): - # 99-01-01 - if dayfirst and self[2] <= 12: - year, day, month = self - else: - year, month, day = self - elif self[0] > 12 or (dayfirst and self[1] <= 12): - # 13-01-01 - day, month, year = self - else: - # 01-13-01 - month, day, year = self - - return year, month, day - - -class parser(object): - def __init__(self, info=None): - self.info = info or parserinfo() - - def parse(self, timestr, default=None, - ignoretz=False, tzinfos=None, **kwargs): - """ - Parse the date/time string into a :class:`datetime.datetime` object. - - :param timestr: - Any date/time string using the supported formats. - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a - naive :class:`datetime.datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param \\*\\*kwargs: - Keyword arguments as passed to ``_parse()``. - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ValueError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises TypeError: - Raised for non-string or character stream input. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - - if default is None: - default = datetime.datetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) - - res, skipped_tokens = self._parse(timestr, **kwargs) - - if res is None: - raise ValueError("Unknown string format:", timestr) - - if len(res) == 0: - raise ValueError("String does not contain a date:", timestr) - - ret = self._build_naive(res, default) - - if not ignoretz: - ret = self._build_tzaware(ret, res, tzinfos) - - if kwargs.get('fuzzy_with_tokens', False): - return ret, skipped_tokens - else: - return ret - - class _result(_resultbase): - __slots__ = ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond", - "tzname", "tzoffset", "ampm","any_unused_tokens"] - - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, - fuzzy_with_tokens=False): - """ - Private method which performs the heavy lifting of parsing, called from - ``parse()``, which passes on its ``kwargs`` to this function. - - :param timestr: - The string to parse. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. If set to ``None``, this value is retrieved from the - current :class:`parserinfo` object (which itself defaults to - ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - If this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - """ - if fuzzy_with_tokens: - fuzzy = True - - info = self.info - - if dayfirst is None: - dayfirst = info.dayfirst - - if yearfirst is None: - yearfirst = info.yearfirst - - res = self._result() - l = _timelex.split(timestr) # Splits the timestr into tokens - - skipped_idxs = [] - - # year/month/day list - ymd = _ymd() - - len_l = len(l) - i = 0 - try: - while i < len_l: - - # Check if it's a number - value_repr = l[i] - try: - value = float(value_repr) - except ValueError: - value = None - - if value is not None: - # Numeric token - i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) - - # Check weekday - elif info.weekday(l[i]) is not None: - value = info.weekday(l[i]) - res.weekday = value - - # Check month name - elif info.month(l[i]) is not None: - value = info.month(l[i]) - ymd.append(value, 'M') - - if i + 1 < len_l: - if l[i + 1] in ('-', '/'): - # Jan-01[-99] - sep = l[i + 1] - ymd.append(l[i + 2]) - - if i + 3 < len_l and l[i + 3] == sep: - # Jan-01-99 - ymd.append(l[i + 4]) - i += 2 - - i += 2 - - elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and - info.pertain(l[i + 2])): - # Jan of 01 - # In this case, 01 is clearly year - if l[i + 4].isdigit(): - # Convert it here to become unambiguous - value = int(l[i + 4]) - year = str(info.convertyear(value)) - ymd.append(year, 'Y') - else: - # Wrong guess - pass - # TODO: not hit in tests - i += 4 - - # Check am/pm - elif info.ampm(l[i]) is not None: - value = info.ampm(l[i]) - val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) - - if val_is_ampm: - res.hour = self._adjust_ampm(res.hour, value) - res.ampm = value - - elif fuzzy: - skipped_idxs.append(i) - - # Check for a timezone name - elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): - res.tzname = l[i] - res.tzoffset = info.tzoffset(res.tzname) - - # Check for something like GMT+3, or BRST+3. Notice - # that it doesn't mean "I am 3 hours after GMT", but - # "my time +3 is GMT". If found, we reverse the - # logic so that timezone parsing code will get it - # right. - if i + 1 < len_l and l[i + 1] in ('+', '-'): - l[i + 1] = ('+', '-')[l[i + 1] == '+'] - res.tzoffset = None - if info.utczone(res.tzname): - # With something like GMT+3, the timezone - # is *not* GMT. - res.tzname = None - - # Check for a numbered timezone - elif res.hour is not None and l[i] in ('+', '-'): - signal = (-1, 1)[l[i] == '+'] - len_li = len(l[i + 1]) - - # TODO: check that l[i + 1] is integer? - if len_li == 4: - # -0300 - hour_offset = int(l[i + 1][:2]) - min_offset = int(l[i + 1][2:]) - elif i + 2 < len_l and l[i + 2] == ':': - # -03:00 - hour_offset = int(l[i + 1]) - min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? - i += 2 - elif len_li <= 2: - # -[0]3 - hour_offset = int(l[i + 1][:2]) - min_offset = 0 - else: - raise ValueError(timestr) - - res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) - - # Look for a timezone name between parenthesis - if (i + 5 < len_l and - info.jump(l[i + 2]) and l[i + 3] == '(' and - l[i + 5] == ')' and - 3 <= len(l[i + 4]) and - self._could_be_tzname(res.hour, res.tzname, - None, l[i + 4])): - # -0300 (BRST) - res.tzname = l[i + 4] - i += 4 - - i += 1 - - # Check jumps - elif not (info.jump(l[i]) or fuzzy): - raise ValueError(timestr) - - else: - skipped_idxs.append(i) - i += 1 - - # Process year/month/day - year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) - - res.century_specified = ymd.century_specified - res.year = year - res.month = month - res.day = day - - except (IndexError, ValueError): - return None, None - - if not info.validate(res): - return None, None - - if fuzzy_with_tokens: - skipped_tokens = self._recombine_skipped(l, skipped_idxs) - return res, tuple(skipped_tokens) - else: - return res, None - - def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): - # Token is a number - value_repr = tokens[idx] - try: - value = self._to_decimal(value_repr) - except Exception as e: - six.raise_from(ValueError('Unknown numeric token'), e) - - len_li = len(value_repr) - - len_l = len(tokens) - - if (len(ymd) == 3 and len_li in (2, 4) and - res.hour is None and - (idx + 1 >= len_l or - (tokens[idx + 1] != ':' and - info.hms(tokens[idx + 1]) is None))): - # 19990101T23[59] - s = tokens[idx] - res.hour = int(s[:2]) - - if len_li == 4: - res.minute = int(s[2:]) - - elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): - # YYMMDD or HHMMSS[.ss] - s = tokens[idx] - - if not ymd and '.' not in tokens[idx]: - ymd.append(s[:2]) - ymd.append(s[2:4]) - ymd.append(s[4:]) - else: - # 19990101T235959[.59] - - # TODO: Check if res attributes already set. - res.hour = int(s[:2]) - res.minute = int(s[2:4]) - res.second, res.microsecond = self._parsems(s[4:]) - - elif len_li in (8, 12, 14): - # YYYYMMDD - s = tokens[idx] - ymd.append(s[:4], 'Y') - ymd.append(s[4:6]) - ymd.append(s[6:8]) - - if len_li > 8: - res.hour = int(s[8:10]) - res.minute = int(s[10:12]) - - if len_li > 12: - res.second = int(s[12:]) - - elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: - # HH[ ]h or MM[ ]m or SS[.ss][ ]s - hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) - (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) - if hms is not None: - # TODO: checking that hour/minute/second are not - # already set? - self._assign_hms(res, value_repr, hms) - - elif idx + 2 < len_l and tokens[idx + 1] == ':': - # HH:MM[:SS[.ss]] - res.hour = int(value) - value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? - (res.minute, res.second) = self._parse_min_sec(value) - - if idx + 4 < len_l and tokens[idx + 3] == ':': - res.second, res.microsecond = self._parsems(tokens[idx + 4]) - - idx += 2 - - idx += 2 - - elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): - sep = tokens[idx + 1] - ymd.append(value_repr) - - if idx + 2 < len_l and not info.jump(tokens[idx + 2]): - if tokens[idx + 2].isdigit(): - # 01-01[-01] - ymd.append(tokens[idx + 2]) - else: - # 01-Jan[-01] - value = info.month(tokens[idx + 2]) - - if value is not None: - ymd.append(value, 'M') - else: - raise ValueError() - - if idx + 3 < len_l and tokens[idx + 3] == sep: - # We have three members - value = info.month(tokens[idx + 4]) - - if value is not None: - ymd.append(value, 'M') - else: - ymd.append(tokens[idx + 4]) - idx += 2 - - idx += 1 - idx += 1 - - elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): - if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: - # 12 am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) - idx += 1 - else: - # Year, month or day - ymd.append(value) - idx += 1 - - elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): - # 12am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) - idx += 1 - - elif ymd.could_be_day(value): - ymd.append(value) - - elif not fuzzy: - raise ValueError() - - return idx - - def _find_hms_idx(self, idx, tokens, info, allow_jump): - len_l = len(tokens) - - if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: - # There is an "h", "m", or "s" label following this token. We take - # assign the upcoming label to the current token. - # e.g. the "12" in 12h" - hms_idx = idx + 1 - - elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and - info.hms(tokens[idx+2]) is not None): - # There is a space and then an "h", "m", or "s" label. - # e.g. the "12" in "12 h" - hms_idx = idx + 2 - - elif idx > 0 and info.hms(tokens[idx-1]) is not None: - # There is a "h", "m", or "s" preceeding this token. Since neither - # of the previous cases was hit, there is no label following this - # token, so we use the previous label. - # e.g. the "04" in "12h04" - hms_idx = idx-1 - - elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and - info.hms(tokens[idx-2]) is not None): - # If we are looking at the final token, we allow for a - # backward-looking check to skip over a space. - # TODO: Are we sure this is the right condition here? - hms_idx = idx - 2 - - else: - hms_idx = None - - return hms_idx - - def _assign_hms(self, res, value_repr, hms): - # See GH issue #427, fixing float rounding - value = self._to_decimal(value_repr) - - if hms == 0: - # Hour - res.hour = int(value) - if value % 1: - res.minute = int(60*(value % 1)) - - elif hms == 1: - (res.minute, res.second) = self._parse_min_sec(value) - - elif hms == 2: - (res.second, res.microsecond) = self._parsems(value_repr) - - def _could_be_tzname(self, hour, tzname, tzoffset, token): - return (hour is not None and - tzname is None and - tzoffset is None and - len(token) <= 5 and - (all(x in string.ascii_uppercase for x in token) - or token in self.info.UTCZONE)) - - def _ampm_valid(self, hour, ampm, fuzzy): - """ - For fuzzy parsing, 'a' or 'am' (both valid English words) - may erroneously trigger the AM/PM flag. Deal with that - here. - """ - val_is_ampm = True - - # If there's already an AM/PM flag, this one isn't one. - if fuzzy and ampm is not None: - val_is_ampm = False - - # If AM/PM is found and hour is not, raise a ValueError - if hour is None: - if fuzzy: - val_is_ampm = False - else: - raise ValueError('No hour specified with AM or PM flag.') - elif not 0 <= hour <= 12: - # If AM/PM is found, it's a 12 hour clock, so raise - # an error for invalid range - if fuzzy: - val_is_ampm = False - else: - raise ValueError('Invalid hour specified for 12-hour clock.') - - return val_is_ampm - - def _adjust_ampm(self, hour, ampm): - if hour < 12 and ampm == 1: - hour += 12 - elif hour == 12 and ampm == 0: - hour = 0 - return hour - - def _parse_min_sec(self, value): - # TODO: Every usage of this function sets res.second to the return - # value. Are there any cases where second will be returned as None and - # we *dont* want to set res.second = None? - minute = int(value) - second = None - - sec_remainder = value % 1 - if sec_remainder: - second = int(60 * sec_remainder) - return (minute, second) - - def _parsems(self, value): - """Parse a I[.F] seconds value into (seconds, microseconds).""" - if "." not in value: - return int(value), 0 - else: - i, f = value.split(".") - return int(i), int(f.ljust(6, "0")[:6]) - - def _parse_hms(self, idx, tokens, info, hms_idx): - # TODO: Is this going to admit a lot of false-positives for when we - # just happen to have digits and "h", "m" or "s" characters in non-date - # text? I guess hex hashes won't have that problem, but there's plenty - # of random junk out there. - if hms_idx is None: - hms = None - new_idx = idx - elif hms_idx > idx: - hms = info.hms(tokens[hms_idx]) - new_idx = hms_idx - else: - # Looking backwards, increment one. - hms = info.hms(tokens[hms_idx]) + 1 - new_idx = idx - - return (new_idx, hms) - - def _recombine_skipped(self, tokens, skipped_idxs): - """ - >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] - >>> skipped_idxs = [0, 1, 2, 5] - >>> _recombine_skipped(tokens, skipped_idxs) - ["foo bar", "baz"] - """ - skipped_tokens = [] - for i, idx in enumerate(sorted(skipped_idxs)): - if i > 0 and idx - 1 == skipped_idxs[i - 1]: - skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] - else: - skipped_tokens.append(tokens[idx]) - - return skipped_tokens - - def _build_tzinfo(self, tzinfos, tzname, tzoffset): - if callable(tzinfos): - tzdata = tzinfos(tzname, tzoffset) - else: - tzdata = tzinfos.get(tzname) - # handle case where tzinfo is paased an options that returns None - # eg tzinfos = {'BRST' : None} - if isinstance(tzdata, datetime.tzinfo) or tzdata is None: - tzinfo = tzdata - elif isinstance(tzdata, text_type): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, integer_types): - tzinfo = tz.tzoffset(tzname, tzdata) - return tzinfo - - def _build_tzaware(self, naive, res, tzinfos): - if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): - tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) - aware = naive.replace(tzinfo=tzinfo) - aware = self._assign_tzname(aware, res.tzname) - - elif res.tzname and res.tzname in time.tzname: - aware = naive.replace(tzinfo=tz.tzlocal()) - - # Handle ambiguous local datetime - aware = self._assign_tzname(aware, res.tzname) - - # This is mostly relevant for winter GMT zones parsed in the UK - if (aware.tzname() != res.tzname and - res.tzname in self.info.UTCZONE): - aware = aware.replace(tzinfo=tz.tzutc()) - - elif res.tzoffset == 0: - aware = naive.replace(tzinfo=tz.tzutc()) - - elif res.tzoffset: - aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - - elif not res.tzname and not res.tzoffset: - # i.e. no timezone information was found. - aware = naive - - elif res.tzname: - # tz-like string was parsed but we don't know what to do - # with it - warnings.warn("tzname {tzname} identified but not understood. " - "Pass `tzinfos` argument in order to correctly " - "return a timezone-aware datetime. In a future " - "version, this will raise an " - "exception.".format(tzname=res.tzname), - category=UnknownTimezoneWarning) - aware = naive - - return aware - - def _build_naive(self, res, default): - repl = {} - for attr in ("year", "month", "day", "hour", - "minute", "second", "microsecond"): - value = getattr(res, attr) - if value is not None: - repl[attr] = value - - if 'day' not in repl: - # If the default day exceeds the last day of the month, fall back - # to the end of the month. - cyear = default.year if res.year is None else res.year - cmonth = default.month if res.month is None else res.month - cday = default.day if res.day is None else res.day - - if cday > monthrange(cyear, cmonth)[1]: - repl['day'] = monthrange(cyear, cmonth)[1] - - naive = default.replace(**repl) - - if res.weekday is not None and not res.day: - naive = naive + relativedelta.relativedelta(weekday=res.weekday) - - return naive - - def _assign_tzname(self, dt, tzname): - if dt.tzname() != tzname: - new_dt = tz.enfold(dt, fold=1) - if new_dt.tzname() == tzname: - return new_dt - - return dt - - def _to_decimal(self, val): - try: - decimal_value = Decimal(val) - # See GH 662, edge case, infinite value should not be converted via `_to_decimal` - if not decimal_value.is_finite(): - raise ValueError("Converted decimal value is infinite or NaN") - except Exception as e: - msg = "Could not convert %s to decimal" % val - six.raise_from(ValueError(msg), e) - else: - return decimal_value - - -DEFAULTPARSER = parser() - - -def parse(timestr, parserinfo=None, **kwargs): - """ - - Parse a string in one of the supported formats, using the - ``parserinfo`` parameters. - - :param timestr: - A string containing a date/time stamp. - - :param parserinfo: - A :class:`parserinfo` object containing parameters for the parser. - If ``None``, the default arguments to the :class:`parserinfo` - constructor are used. - - The ``**kwargs`` parameter takes the following keyword arguments: - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM and - YMD. If set to ``None``, this value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken to - be the year, otherwise the last number is taken to be the year. If - this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ValueError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - if parserinfo: - return parser(parserinfo).parse(timestr, **kwargs) - else: - return DEFAULTPARSER.parse(timestr, **kwargs) - - -class _tzparser(object): - - class _result(_resultbase): - - __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", - "start", "end"] - - class _attr(_resultbase): - __slots__ = ["month", "week", "weekday", - "yday", "jyday", "day", "time"] - - def __repr__(self): - return self._repr("") - - def __init__(self): - _resultbase.__init__(self) - self.start = self._attr() - self.end = self._attr() - - def parse(self, tzstr): - res = self._result() - l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] - used_idxs = list() - try: - - len_l = len(l) - - i = 0 - while i < len_l: - # BRST+3[BRDT[+2]] - j = i - while j < len_l and not [x for x in l[j] - if x in "0123456789:,-+"]: - j += 1 - if j != i: - if not res.stdabbr: - offattr = "stdoffset" - res.stdabbr = "".join(l[i:j]) - else: - offattr = "dstoffset" - res.dstabbr = "".join(l[i:j]) - - for ii in range(j): - used_idxs.append(ii) - i = j - if (i < len_l and (l[i] in ('+', '-') or l[i][0] in - "0123456789")): - if l[i] in ('+', '-'): - # Yes, that's right. See the TZ variable - # documentation. - signal = (1, -1)[l[i] == '+'] - used_idxs.append(i) - i += 1 - else: - signal = -1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - setattr(res, offattr, (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) * signal) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - setattr(res, offattr, - (int(l[i]) * 3600 + - int(l[i + 2]) * 60) * signal) - used_idxs.append(i) - i += 2 - elif len_li <= 2: - # -[0]3 - setattr(res, offattr, - int(l[i][:2]) * 3600 * signal) - else: - return None - used_idxs.append(i) - i += 1 - if res.dstabbr: - break - else: - break - - - if i < len_l: - for j in range(i, len_l): - if l[j] == ';': - l[j] = ',' - - assert l[i] == ',' - - i += 1 - - if i >= len_l: - pass - elif (8 <= l.count(',') <= 9 and - not [y for x in l[i:] if x != ',' - for y in x if y not in "0123456789+-"]): - # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] - for x in (res.start, res.end): - x.month = int(l[i]) - used_idxs.append(i) - i += 2 - if l[i] == '-': - value = int(l[i + 1]) * -1 - used_idxs.append(i) - i += 1 - else: - value = int(l[i]) - used_idxs.append(i) - i += 2 - if value: - x.week = value - x.weekday = (int(l[i]) - 1) % 7 - else: - x.day = int(l[i]) - used_idxs.append(i) - i += 2 - x.time = int(l[i]) - used_idxs.append(i) - i += 2 - if i < len_l: - if l[i] in ('-', '+'): - signal = (-1, 1)[l[i] == "+"] - used_idxs.append(i) - i += 1 - else: - signal = 1 - used_idxs.append(i) - res.dstoffset = (res.stdoffset + int(l[i]) * signal) - - # This was a made-up format that is not in normal use - warn(('Parsed time zone "%s"' % tzstr) + - 'is in a non-standard dateutil-specific format, which ' + - 'is now deprecated; support for parsing this format ' + - 'will be removed in future versions. It is recommended ' + - 'that you switch to a standard format like the GNU ' + - 'TZ variable format.', tz.DeprecatedTzFormatWarning) - elif (l.count(',') == 2 and l[i:].count('/') <= 2 and - not [y for x in l[i:] if x not in (',', '/', 'J', 'M', - '.', '-', ':') - for y in x if y not in "0123456789"]): - for x in (res.start, res.end): - if l[i] == 'J': - # non-leap year day (1 based) - used_idxs.append(i) - i += 1 - x.jyday = int(l[i]) - elif l[i] == 'M': - # month[-.]week[-.]weekday - used_idxs.append(i) - i += 1 - x.month = int(l[i]) - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.week = int(l[i]) - if x.week == 5: - x.week = -1 - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.weekday = (int(l[i]) - 1) % 7 - else: - # year day (zero based) - x.yday = int(l[i]) + 1 - - used_idxs.append(i) - i += 1 - - if i < len_l and l[i] == '/': - used_idxs.append(i) - i += 1 - # start time - len_li = len(l[i]) - if len_li == 4: - # -0300 - x.time = (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 - used_idxs.append(i) - i += 2 - if i + 1 < len_l and l[i + 1] == ':': - used_idxs.append(i) - i += 2 - x.time += int(l[i]) - elif len_li <= 2: - # -[0]3 - x.time = (int(l[i][:2]) * 3600) - else: - return None - used_idxs.append(i) - i += 1 - - assert i == len_l or l[i] == ',' - - i += 1 - - assert i >= len_l - - except (IndexError, ValueError, AssertionError): - return None - - unused_idxs = set(range(len_l)).difference(used_idxs) - res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) - return res - - -DEFAULTTZPARSER = _tzparser() - - -def _parsetz(tzstr): - return DEFAULTTZPARSER.parse(tzstr) - -class UnknownTimezoneWarning(RuntimeWarning): - """Raised when the parser finds a timezone it cannot parse into a tzinfo""" -# vim:ts=4:sw=4:et diff --git a/utill/dateutil/parser/isoparser.py b/utill/dateutil/parser/isoparser.py deleted file mode 100644 index e3cf6d8..0000000 --- a/utill/dateutil/parser/isoparser.py +++ /dev/null @@ -1,411 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a parser for ISO-8601 strings - -It is intended to support all valid date, time and datetime formats per the -ISO-8601 specification. - -..versionadded:: 2.7.0 -""" -from datetime import datetime, timedelta, time, date -import calendar -from dateutil import tz - -from functools import wraps - -import re -import six - -__all__ = ["isoparse", "isoparser"] - - -def _takes_ascii(f): - @wraps(f) - def func(self, str_in, *args, **kwargs): - # If it's a stream, read the whole thing - str_in = getattr(str_in, 'read', lambda: str_in)() - - # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII - if isinstance(str_in, six.text_type): - # ASCII is the same in UTF-8 - try: - str_in = str_in.encode('ascii') - except UnicodeEncodeError as e: - msg = 'ISO-8601 strings should contain only ASCII characters' - six.raise_from(ValueError(msg), e) - - return f(self, str_in, *args, **kwargs) - - return func - - -class isoparser(object): - def __init__(self, sep=None): - """ - :param sep: - A single character that separates date and time portions. If - ``None``, the parser will accept any single character. - For strict ISO-8601 adherence, pass ``'T'``. - """ - if sep is not None: - if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): - raise ValueError('Separator must be a single, non-numeric ' + - 'ASCII character') - - sep = sep.encode('ascii') - - self._sep = sep - - @_takes_ascii - def isoparse(self, dt_str): - """ - Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. - - An ISO-8601 datetime string consists of a date portion, followed - optionally by a time portion - the date and time portions are separated - by a single character separator, which is ``T`` in the official - standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be - combined with a time portion. - - Supported date formats are: - - Common: - - - ``YYYY`` - - ``YYYY-MM`` or ``YYYYMM`` - - ``YYYY-MM-DD`` or ``YYYYMMDD`` - - Uncommon: - - - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) - - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day - - The ISO week and day numbering follows the same logic as - :func:`datetime.date.isocalendar`. - - Supported time formats are: - - - ``hh`` - - ``hh:mm`` or ``hhmm`` - - ``hh:mm:ss`` or ``hhmmss`` - - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) - - Midnight is a special case for `hh`, as the standard supports both - 00:00 and 24:00 as a representation. The decimal separator can be - either a dot or a comma. - - - .. caution:: - - Support for fractional components other than seconds is part of the - ISO-8601 standard, but is not currently implemented in this parser. - - Supported time zone offset formats are: - - - `Z` (UTC) - - `±HH:MM` - - `±HHMM` - - `±HH` - - Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, - with the exception of UTC, which will be represented as - :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such - as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. - - :param dt_str: - A string or stream containing only an ISO-8601 datetime string - - :return: - Returns a :class:`datetime.datetime` representing the string. - Unspecified components default to their lowest value. - - .. warning:: - - As of version 2.7.0, the strictness of the parser should not be - considered a stable part of the contract. Any valid ISO-8601 string - that parses correctly with the default settings will continue to - parse correctly in future versions, but invalid strings that - currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not - guaranteed to continue failing in future versions if they encode - a valid date. - - .. versionadded:: 2.7.0 - """ - components, pos = self._parse_isodate(dt_str) - - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - """ - Parse the date portion of an ISO string. - - :param datestr: - The string portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.date` object - """ - components, pos = self._parse_isodate(datestr) - if pos < len(datestr): - raise ValueError('String contains unknown ISO ' + - 'components: {}'.format(datestr)) - return date(*components) - - @_takes_ascii - def parse_isotime(self, timestr): - """ - Parse the time portion of an ISO string. - - :param timestr: - The time portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.time` object - """ - components = self._parse_isotime(timestr) - if components[0] == 24: - components[0] = 0 - return time(*components) - - @_takes_ascii - def parse_tzstr(self, tzstr, zero_as_utc=True): - """ - Parse a valid ISO time zone string. - - See :func:`isoparser.isoparse` for details on supported formats. - - :param tzstr: - A string representing an ISO time zone offset - - :param zero_as_utc: - Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones - - :return: - Returns :class:`dateutil.tz.tzoffset` for offsets and - :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is - specified) offsets equivalent to UTC. - """ - return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) - - # Constants - _DATE_SEP = b'-' - _TIME_SEP = b':' - _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') - - def _parse_isodate(self, dt_str): - try: - return self._parse_isodate_common(dt_str) - except ValueError: - return self._parse_isodate_uncommon(dt_str) - - def _parse_isodate_common(self, dt_str): - len_str = len(dt_str) - components = [1, 1, 1] - - if len_str < 4: - raise ValueError('ISO string too short') - - # Year - components[0] = int(dt_str[0:4]) - pos = 4 - if pos >= len_str: - return components, pos - - has_sep = dt_str[pos:pos + 1] == self._DATE_SEP - if has_sep: - pos += 1 - - # Month - if len_str - pos < 2: - raise ValueError('Invalid common month') - - components[1] = int(dt_str[pos:pos + 2]) - pos += 2 - - if pos >= len_str: - if has_sep: - return components, pos - else: - raise ValueError('Invalid ISO format') - - if has_sep: - if dt_str[pos:pos + 1] != self._DATE_SEP: - raise ValueError('Invalid separator in ISO string') - pos += 1 - - # Day - if len_str - pos < 2: - raise ValueError('Invalid common day') - components[2] = int(dt_str[pos:pos + 2]) - return components, pos + 2 - - def _parse_isodate_uncommon(self, dt_str): - if len(dt_str) < 4: - raise ValueError('ISO string too short') - - # All ISO formats start with the year - year = int(dt_str[0:4]) - - has_sep = dt_str[4:5] == self._DATE_SEP - - pos = 4 + has_sep # Skip '-' if it's there - if dt_str[pos:pos + 1] == b'W': - # YYYY-?Www-?D? - pos += 1 - weekno = int(dt_str[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dt_str) > pos: - if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: - raise ValueError('Inconsistent use of dash separator') - - pos += has_sep - - dayno = int(dt_str[pos:pos + 1]) - pos += 1 - - base_date = self._calculate_weekdate(year, weekno, dayno) - else: - # YYYYDDD or YYYY-DDD - if len(dt_str) - pos < 3: - raise ValueError('Invalid ordinal day') - - ordinal_day = int(dt_str[pos:pos + 3]) - pos += 3 - - if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): - raise ValueError('Invalid ordinal day' + - ' {} for year {}'.format(ordinal_day, year)) - - base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) - - components = [base_date.year, base_date.month, base_date.day] - return components, pos - - def _calculate_weekdate(self, year, week, day): - """ - Calculate the day of corresponding to the ISO year-week-day calendar. - - This function is effectively the inverse of - :func:`datetime.date.isocalendar`. - - :param year: - The year in the ISO calendar - - :param week: - The week in the ISO calendar - range is [1, 53] - - :param day: - The day in the ISO calendar - range is [1 (MON), 7 (SUN)] - - :return: - Returns a :class:`datetime.date` - """ - if not 0 < week < 54: - raise ValueError('Invalid week: {}'.format(week)) - - if not 0 < day < 8: # Range is 1-7 - raise ValueError('Invalid weekday: {}'.format(day)) - - # Get week 1 for the specific year: - jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it - week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) - - # Now add the specific number of weeks and days to get what we want - week_offset = (week - 1) * 7 + (day - 1) - return week_1 + timedelta(days=week_offset) - - def _parse_isotime(self, timestr): - len_str = len(timestr) - components = [0, 0, 0, 0, None] - pos = 0 - comp = -1 - - if len(timestr) < 2: - raise ValueError('ISO time too short') - - has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP - - while pos < len_str and comp < 5: - comp += 1 - - if timestr[pos:pos + 1] in b'-+Zz': - # Detect time zone boundary - components[-1] = self._parse_tzstr(timestr[pos:]) - pos = len_str - break - - if comp < 3: - # Hour, minute, second - components[comp] = int(timestr[pos:pos + 2]) - pos += 2 - if (has_sep and pos < len_str and - timestr[pos:pos + 1] == self._TIME_SEP): - pos += 1 - - if comp == 3: - # Fraction of a second - frac = self._FRACTION_REGEX.match(timestr[pos:]) - if not frac: - continue - - us_str = frac.group(1)[:6] # Truncate to microseconds - components[comp] = int(us_str) * 10**(6 - len(us_str)) - pos += len(frac.group()) - - if pos < len_str: - raise ValueError('Unused components in ISO string') - - if components[0] == 24: - # Standard supports 00:00 and 24:00 as representations of midnight - if any(component != 0 for component in components[1:4]): - raise ValueError('Hour may only be 24 at 24:00:00.000') - - return components - - def _parse_tzstr(self, tzstr, zero_as_utc=True): - if tzstr == b'Z' or tzstr == b'z': - return tz.tzutc() - - if len(tzstr) not in {3, 5, 6}: - raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') - - if tzstr[0:1] == b'-': - mult = -1 - elif tzstr[0:1] == b'+': - mult = 1 - else: - raise ValueError('Time zone offset requires sign') - - hours = int(tzstr[1:3]) - if len(tzstr) == 3: - minutes = 0 - else: - minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) - - if zero_as_utc and hours == 0 and minutes == 0: - return tz.tzutc() - else: - if minutes > 59: - raise ValueError('Invalid minutes in time zone offset') - - if hours > 23: - raise ValueError('Invalid hours in time zone offset') - - return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) - - -DEFAULT_ISOPARSER = isoparser() -isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/utill/dateutil/relativedelta.py b/utill/dateutil/relativedelta.py deleted file mode 100644 index c65c66e..0000000 --- a/utill/dateutil/relativedelta.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -import datetime -import calendar - -import operator -from math import copysign - -from six import integer_types -from warnings import warn - -from ._common import weekday - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - - -class relativedelta(object): - """ - The relativedelta type is designed to be applied to an existing datetime and - can replace specific components of that datetime, or represents an interval - of time. - - It is based on the specification of the excellent work done by M.-A. Lemburg - in his - `mx.DateTime `_ extension. - However, notice that this type does *NOT* implement the same algorithm as - his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - - There are two different ways to build a relativedelta instance. The - first one is passing it two date/datetime classes:: - - relativedelta(datetime1, datetime2) - - The second one is passing it any number of the following keyword arguments:: - - relativedelta(arg1=x,arg2=y,arg3=z...) - - year, month, day, hour, minute, second, microsecond: - Absolute information (argument is singular); adding or subtracting a - relativedelta with absolute information does not perform an arithmetic - operation, but rather REPLACES the corresponding value in the - original datetime with the value(s) in relativedelta. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative (argument is plural); adding - or subtracting a relativedelta with relative information performs - the corresponding aritmetic operation on the original datetime value - with the information in the relativedelta. - - weekday: - One of the weekday instances (MO, TU, etc) available in the - relativedelta module. These instances may receive a parameter N, - specifying the Nth weekday, which could be positive or negative - (like MO(+1) or MO(-2)). Not specifying it is the same as specifying - +1. You can also use an integer, where 0=MO. This argument is always - relative e.g. if the calculated date is already Monday, using MO(1) - or MO(-1) won't change the day. To effectively make it absolute, use - it in combination with the day argument (e.g. day=1, MO(1) for first - Monday of the month). - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - - There are relative and absolute forms of the keyword - arguments. The plural is relative, and the singular is - absolute. For each argument in the order below, the absolute form - is applied first (by setting each attribute to that value) and - then the relative form (by adding the value to the attribute). - - The order of attributes considered when this relativedelta is - added to a datetime is: - - 1. Year - 2. Month - 3. Day - 4. Hours - 5. Minutes - 6. Seconds - 7. Microseconds - - Finally, weekday is applied, using the rule described above. - - For example - - >>> from datetime import datetime - >>> from dateutil.relativedelta import relativedelta, MO - >>> dt = datetime(2018, 4, 9, 13, 37, 0) - >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) - >>> dt + delta - datetime.datetime(2018, 4, 2, 14, 37) - - First, the day is set to 1 (the first of the month), then 25 hours - are added, to get to the 2nd day and 14th hour, finally the - weekday is applied, but since the 2nd is already a Monday there is - no effect. - - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - - if dt1 and dt2: - # datetime is a subclass of date. So both must be date - if not (isinstance(dt1, datetime.date) and - isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - - # We allow two dates, or two datetimes, so we coerce them to be - # of the same type - if (isinstance(dt1, datetime.datetime) != - isinstance(dt2, datetime.datetime)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - # Get year / month delta between the two - months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) - self._set_months(months) - - # Remove the year/month delta so the timedelta is just well-defined - # time units (seconds, days and microseconds) - dtm = self.__radd__(dt2) - - # If we've overshot our target, make an adjustment - if dt1 < dt2: - compare = operator.gt - increment = 1 - else: - compare = operator.lt - increment = -1 - - while compare(dt1, dtm): - months += increment - self._set_months(months) - dtm = self.__radd__(dt2) - - # Get the timedelta between the "months-adjusted" date and dt1 - delta = dt1 - dtm - self.seconds = delta.seconds + delta.days * 86400 - self.microseconds = delta.microseconds - else: - # Check for non-integer values in integer-only quantities - if any(x is not None and x != int(x) for x in (years, months)): - raise ValueError("Non-integer years and months are " - "ambiguous and not currently supported.") - - # Relative information - self.years = int(years) - self.months = int(months) - self.days = days + weeks * 7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - - # Absolute information - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if any(x is not None and int(x) != x - for x in (year, month, day, hour, - minute, second, microsecond)): - # For now we'll deprecate floats - later it'll be an error. - warn("Non-integer value passed as absolute information. " + - "This is not a well-defined condition and will raise " + - "errors in future versions.", DeprecationWarning) - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, - 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = _sign(self.microseconds) - div, mod = divmod(self.microseconds * s, 1000000) - self.microseconds = mod * s - self.seconds += div * s - if abs(self.seconds) > 59: - s = _sign(self.seconds) - div, mod = divmod(self.seconds * s, 60) - self.seconds = mod * s - self.minutes += div * s - if abs(self.minutes) > 59: - s = _sign(self.minutes) - div, mod = divmod(self.minutes * s, 60) - self.minutes = mod * s - self.hours += div * s - if abs(self.hours) > 23: - s = _sign(self.hours) - div, mod = divmod(self.hours * s, 24) - self.hours = mod * s - self.days += div * s - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years += div * s - if (self.hours or self.minutes or self.seconds or self.microseconds - or self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - @property - def weeks(self): - return int(self.days / 7.0) - - @weeks.setter - def weeks(self, value): - self.days = self.days - (self.weeks * 7) + value * 7 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years = div * s - else: - self.years = 0 - - def normalized(self): - """ - Return a version of this object represented entirely using integer - values for the relative attributes. - - >>> relativedelta(days=1.5, hours=2).normalized() - relativedelta(days=+1, hours=+14) - - :return: - Returns a :class:`dateutil.relativedelta.relativedelta` object. - """ - # Cascade remainders down (rounding each to roughly nearest microsecond) - days = int(self.days) - - hours_f = round(self.hours + 24 * (self.days - days), 11) - hours = int(hours_f) - - minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) - minutes = int(minutes_f) - - seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) - seconds = int(seconds_f) - - microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) - - # Constructor carries overflow back up with call to _fix() - return self.__class__(years=self.years, months=self.months, - days=days, hours=hours, minutes=minutes, - seconds=seconds, microseconds=microseconds, - leapdays=self.leapdays, year=self.year, - month=self.month, day=self.day, - weekday=self.weekday, hour=self.hour, - minute=self.minute, second=self.second, - microsecond=self.microsecond) - - def __add__(self, other): - if isinstance(other, relativedelta): - return self.__class__(years=other.years + self.years, - months=other.months + self.months, - days=other.days + self.days, - hours=other.hours + self.hours, - minutes=other.minutes + self.minutes, - seconds=other.seconds + self.seconds, - microseconds=(other.microseconds + - self.microseconds), - leapdays=other.leapdays or self.leapdays, - year=(other.year if other.year is not None - else self.year), - month=(other.month if other.month is not None - else self.month), - day=(other.day if other.day is not None - else self.day), - weekday=(other.weekday if other.weekday is not None - else self.weekday), - hour=(other.hour if other.hour is not None - else self.hour), - minute=(other.minute if other.minute is not None - else self.minute), - second=(other.second if other.second is not None - else self.second), - microsecond=(other.microsecond if other.microsecond - is not None else - self.microsecond)) - if isinstance(other, datetime.timedelta): - return self.__class__(years=self.years, - months=self.months, - days=self.days + other.days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds + other.seconds, - microseconds=self.microseconds + other.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - if not isinstance(other, datetime.date): - return NotImplemented - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth) - 1) * 7 - if nth > 0: - jumpdays += (7 - ret.weekday() + weekday) % 7 - else: - jumpdays += (ret.weekday() - weekday) % 7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented # In case the other object defines __rsub__ - return self.__class__(years=self.years - other.years, - months=self.months - other.months, - days=self.days - other.days, - hours=self.hours - other.hours, - minutes=self.minutes - other.minutes, - seconds=self.seconds - other.seconds, - microseconds=self.microseconds - other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=(self.year if self.year is not None - else other.year), - month=(self.month if self.month is not None else - other.month), - day=(self.day if self.day is not None else - other.day), - weekday=(self.weekday if self.weekday is not None else - other.weekday), - hour=(self.hour if self.hour is not None else - other.hour), - minute=(self.minute if self.minute is not None else - other.minute), - second=(self.second if self.second is not None else - other.second), - microsecond=(self.microsecond if self.microsecond - is not None else - other.microsecond)) - - def __abs__(self): - return self.__class__(years=abs(self.years), - months=abs(self.months), - days=abs(self.days), - hours=abs(self.hours), - minutes=abs(self.minutes), - seconds=abs(self.seconds), - microseconds=abs(self.microseconds), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __neg__(self): - return self.__class__(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - # Compatibility with Python 2.x - __nonzero__ = __bool__ - - def __mul__(self, other): - try: - f = float(other) - except TypeError: - return NotImplemented - - return self.__class__(years=int(self.years * f), - months=int(self.months * f), - days=int(self.days * f), - hours=int(self.hours * f), - minutes=int(self.minutes * f), - seconds=int(self.seconds * f), - microseconds=int(self.microseconds * f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.microseconds == other.microseconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __hash__(self): - return hash(( - self.weekday, - self.years, - self.months, - self.days, - self.hours, - self.minutes, - self.seconds, - self.microseconds, - self.leapdays, - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - )) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - try: - reciprocal = 1 / float(other) - except TypeError: - return NotImplemented - - return self.__mul__(reciprocal) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("{attr}={value:+g}".format(attr=attr, value=value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("{attr}={value}".format(attr=attr, value=repr(value))) - return "{classname}({attrs})".format(classname=self.__class__.__name__, - attrs=", ".join(l)) - - -def _sign(x): - return int(copysign(1, x)) - -# vim:ts=4:sw=4:et diff --git a/utill/dateutil/rrule.py b/utill/dateutil/rrule.py deleted file mode 100644 index 20a0c4a..0000000 --- a/utill/dateutil/rrule.py +++ /dev/null @@ -1,1736 +0,0 @@ -# -*- coding: utf-8 -*- -""" -The rrule module offers a small, complete, and very fast, implementation of -the recurrence rules documented in the -`iCalendar RFC `_, -including support for caching of results. -""" -import itertools -import datetime -import calendar -import re -import sys - -try: - from math import gcd -except ImportError: - from fractions import gcd - -from six import advance_iterator, integer_types -from six.moves import _thread, range -import heapq - -from ._common import weekday as weekdaybase -from .tz import tzutc, tzlocal - -# For warning about deprecation of until and count -from warnings import warn - -__all__ = ["rrule", "rruleset", "rrulestr", - "YEARLY", "MONTHLY", "WEEKLY", "DAILY", - "HOURLY", "MINUTELY", "SECONDLY", - "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -# Every mask is 7 days longer to handle cross-year weekly periods. -M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + - [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) -M365MASK = list(M366MASK) -M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) -MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -MDAY365MASK = list(MDAY366MASK) -M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) -NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -NMDAY365MASK = list(NMDAY366MASK) -M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) -M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) -WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 -del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] -MDAY365MASK = tuple(MDAY365MASK) -M365MASK = tuple(M365MASK) - -FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] - -(YEARLY, - MONTHLY, - WEEKLY, - DAILY, - HOURLY, - MINUTELY, - SECONDLY) = list(range(7)) - -# Imported on demand. -easter = None -parser = None - - -class weekday(weekdaybase): - """ - This version of weekday does not allow n = 0. - """ - def __init__(self, wkday, n=None): - if n == 0: - raise ValueError("Can't create weekday with n==0") - - super(weekday, self).__init__(wkday, n) - - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - - -def _invalidates_cache(f): - """ - Decorator for rruleset methods which may invalidate the - cached length. - """ - def inner_func(self, *args, **kwargs): - rv = f(self, *args, **kwargs) - self._invalidate_cache() - return rv - - return inner_func - - -class rrulebase(object): - def __init__(self, cache=False): - if cache: - self._cache = [] - self._cache_lock = _thread.allocate_lock() - self._invalidate_cache() - else: - self._cache = None - self._cache_complete = False - self._len = None - - def __iter__(self): - if self._cache_complete: - return iter(self._cache) - elif self._cache is None: - return self._iter() - else: - return self._iter_cached() - - def _invalidate_cache(self): - if self._cache is not None: - self._cache = [] - self._cache_complete = False - self._cache_gen = self._iter() - - if self._cache_lock.locked(): - self._cache_lock.release() - - self._len = None - - def _iter_cached(self): - i = 0 - gen = self._cache_gen - cache = self._cache - acquire = self._cache_lock.acquire - release = self._cache_lock.release - while gen: - if i == len(cache): - acquire() - if self._cache_complete: - break - try: - for j in range(10): - cache.append(advance_iterator(gen)) - except StopIteration: - self._cache_gen = gen = None - self._cache_complete = True - break - release() - yield cache[i] - i += 1 - while i < self._len: - yield cache[i] - i += 1 - - def __getitem__(self, item): - if self._cache_complete: - return self._cache[item] - elif isinstance(item, slice): - if item.step and item.step < 0: - return list(iter(self))[item] - else: - return list(itertools.islice(self, - item.start or 0, - item.stop or sys.maxsize, - item.step or 1)) - elif item >= 0: - gen = iter(self) - try: - for i in range(item+1): - res = advance_iterator(gen) - except StopIteration: - raise IndexError - return res - else: - return list(iter(self))[item] - - def __contains__(self, item): - if self._cache_complete: - return item in self._cache - else: - for i in self: - if i == item: - return True - elif i > item: - return False - return False - - # __len__() introduces a large performance penality. - def count(self): - """ Returns the number of recurrences in this set. It will have go - trough the whole recurrence, if this hasn't been done before. """ - if self._len is None: - for x in self: - pass - return self._len - - def before(self, dt, inc=False): - """ Returns the last recurrence before the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - last = None - if inc: - for i in gen: - if i > dt: - break - last = i - else: - for i in gen: - if i >= dt: - break - last = i - return last - - def after(self, dt, inc=False): - """ Returns the first recurrence after the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - if inc: - for i in gen: - if i >= dt: - return i - else: - for i in gen: - if i > dt: - return i - return None - - def xafter(self, dt, count=None, inc=False): - """ - Generator which yields up to `count` recurrences after the given - datetime instance, equivalent to `after`. - - :param dt: - The datetime at which to start generating recurrences. - - :param count: - The maximum number of recurrences to generate. If `None` (default), - dates are generated until the recurrence rule is exhausted. - - :param inc: - If `dt` is an instance of the rule and `inc` is `True`, it is - included in the output. - - :yields: Yields a sequence of `datetime` objects. - """ - - if self._cache_complete: - gen = self._cache - else: - gen = self - - # Select the comparison function - if inc: - comp = lambda dc, dtc: dc >= dtc - else: - comp = lambda dc, dtc: dc > dtc - - # Generate dates - n = 0 - for d in gen: - if comp(d, dt): - if count is not None: - n += 1 - if n > count: - break - - yield d - - def between(self, after, before, inc=False, count=1): - """ Returns all the occurrences of the rrule between after and before. - The inc keyword defines what happens if after and/or before are - themselves occurrences. With inc=True, they will be included in the - list, if they are found in the recurrence set. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - started = False - l = [] - if inc: - for i in gen: - if i > before: - break - elif not started: - if i >= after: - started = True - l.append(i) - else: - l.append(i) - else: - for i in gen: - if i >= before: - break - elif not started: - if i > after: - started = True - l.append(i) - else: - l.append(i) - return l - - -class rrule(rrulebase): - """ - That's the base of the rrule operation. It accepts all the keywords - defined in the RFC as its constructor parameters (except byday, - which was renamed to byweekday) and more. The constructor prototype is:: - - rrule(freq) - - Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, - or SECONDLY. - - .. note:: - Per RFC section 3.3.10, recurrence instances falling on invalid dates - and times are ignored rather than coerced: - - Recurrence rules may generate recurrence instances with an invalid - date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM - on a day where the local time is moved forward by an hour at 1:00 - AM). Such recurrence instances MUST be ignored and MUST NOT be - counted as part of the recurrence set. - - This can lead to possibly surprising behavior when, for example, the - start date occurs at the end of the month: - - >>> from dateutil.rrule import rrule, MONTHLY - >>> from datetime import datetime - >>> start_date = datetime(2014, 12, 31) - >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) - ... # doctest: +NORMALIZE_WHITESPACE - [datetime.datetime(2014, 12, 31, 0, 0), - datetime.datetime(2015, 1, 31, 0, 0), - datetime.datetime(2015, 3, 31, 0, 0), - datetime.datetime(2015, 5, 31, 0, 0)] - - Additionally, it supports the following keyword arguments: - - :param dtstart: - The recurrence start. Besides being the base for the recurrence, - missing parameters in the final recurrence instances will also be - extracted from this date. If not given, datetime.now() will be used - instead. - :param interval: - The interval between each freq iteration. For example, when using - YEARLY, an interval of 2 means once every two years, but with HOURLY, - it means once every two hours. The default interval is 1. - :param wkst: - The week start day. Must be one of the MO, TU, WE constants, or an - integer, specifying the first day of the week. This will affect - recurrences based on weekly periods. The default week start is got - from calendar.firstweekday(), and may be modified by - calendar.setfirstweekday(). - :param count: - If given, this determines how many occurrences will be generated. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param until: - If given, this must be a datetime instance specifying the upper-bound - limit of the recurrence. The last recurrence in the rule is the greatest - datetime that is less than or equal to the value specified in the - ``until`` parameter. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param bysetpos: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each given integer will specify an occurrence - number, corresponding to the nth occurrence of the rule inside the - frequency period. For example, a bysetpos of -1 if combined with a - MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will - result in the last work day of every month. - :param bymonth: - If given, it must be either an integer, or a sequence of integers, - meaning the months to apply the recurrence to. - :param bymonthday: - If given, it must be either an integer, or a sequence of integers, - meaning the month days to apply the recurrence to. - :param byyearday: - If given, it must be either an integer, or a sequence of integers, - meaning the year days to apply the recurrence to. - :param byeaster: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each integer will define an offset from the - Easter Sunday. Passing the offset 0 to byeaster will yield the Easter - Sunday itself. This is an extension to the RFC specification. - :param byweekno: - If given, it must be either an integer, or a sequence of integers, - meaning the week numbers to apply the recurrence to. Week numbers - have the meaning described in ISO8601, that is, the first week of - the year is that containing at least four days of the new year. - :param byweekday: - If given, it must be either an integer (0 == MO), a sequence of - integers, one of the weekday constants (MO, TU, etc), or a sequence - of these constants. When given, these variables will define the - weekdays where the recurrence will be applied. It's also possible to - use an argument n for the weekday instances, which will mean the nth - occurrence of this weekday in the period. For example, with MONTHLY, - or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the - first friday of the month where the recurrence happens. Notice that in - the RFC documentation, this is specified as BYDAY, but was renamed to - avoid the ambiguity of that keyword. - :param byhour: - If given, it must be either an integer, or a sequence of integers, - meaning the hours to apply the recurrence to. - :param byminute: - If given, it must be either an integer, or a sequence of integers, - meaning the minutes to apply the recurrence to. - :param bysecond: - If given, it must be either an integer, or a sequence of integers, - meaning the seconds to apply the recurrence to. - :param cache: - If given, it must be a boolean value specifying to enable or disable - caching of results. If you will use the same rrule instance multiple - times, enabling caching will improve the performance considerably. - """ - def __init__(self, freq, dtstart=None, - interval=1, wkst=None, count=None, until=None, bysetpos=None, - bymonth=None, bymonthday=None, byyearday=None, byeaster=None, - byweekno=None, byweekday=None, - byhour=None, byminute=None, bysecond=None, - cache=False): - super(rrule, self).__init__(cache) - global easter - if not dtstart: - if until and until.tzinfo: - dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) - else: - dtstart = datetime.datetime.now().replace(microsecond=0) - elif not isinstance(dtstart, datetime.datetime): - dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) - else: - dtstart = dtstart.replace(microsecond=0) - self._dtstart = dtstart - self._tzinfo = dtstart.tzinfo - self._freq = freq - self._interval = interval - self._count = count - - # Cache the original byxxx rules, if they are provided, as the _byxxx - # attributes do not necessarily map to the inputs, and this can be - # a problem in generating the strings. Only store things if they've - # been supplied (the string retrieval will just use .get()) - self._original_rule = {} - - if until and not isinstance(until, datetime.datetime): - until = datetime.datetime.fromordinal(until.toordinal()) - self._until = until - - if self._dtstart and self._until: - if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): - # According to RFC5545 Section 3.3.10: - # https://tools.ietf.org/html/rfc5545#section-3.3.10 - # - # > If the "DTSTART" property is specified as a date with UTC - # > time or a date with local time and time zone reference, - # > then the UNTIL rule part MUST be specified as a date with - # > UTC time. - raise ValueError( - 'RRULE UNTIL values must be specified in UTC when DTSTART ' - 'is timezone-aware' - ) - - if count is not None and until: - warn("Using both 'count' and 'until' is inconsistent with RFC 5545" - " and has been deprecated in dateutil. Future versions will " - "raise an error.", DeprecationWarning) - - if wkst is None: - self._wkst = calendar.firstweekday() - elif isinstance(wkst, integer_types): - self._wkst = wkst - else: - self._wkst = wkst.weekday - - if bysetpos is None: - self._bysetpos = None - elif isinstance(bysetpos, integer_types): - if bysetpos == 0 or not (-366 <= bysetpos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - self._bysetpos = (bysetpos,) - else: - self._bysetpos = tuple(bysetpos) - for pos in self._bysetpos: - if pos == 0 or not (-366 <= pos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - - if self._bysetpos: - self._original_rule['bysetpos'] = self._bysetpos - - if (byweekno is None and byyearday is None and bymonthday is None and - byweekday is None and byeaster is None): - if freq == YEARLY: - if bymonth is None: - bymonth = dtstart.month - self._original_rule['bymonth'] = None - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == MONTHLY: - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == WEEKLY: - byweekday = dtstart.weekday() - self._original_rule['byweekday'] = None - - # bymonth - if bymonth is None: - self._bymonth = None - else: - if isinstance(bymonth, integer_types): - bymonth = (bymonth,) - - self._bymonth = tuple(sorted(set(bymonth))) - - if 'bymonth' not in self._original_rule: - self._original_rule['bymonth'] = self._bymonth - - # byyearday - if byyearday is None: - self._byyearday = None - else: - if isinstance(byyearday, integer_types): - byyearday = (byyearday,) - - self._byyearday = tuple(sorted(set(byyearday))) - self._original_rule['byyearday'] = self._byyearday - - # byeaster - if byeaster is not None: - if not easter: - from dateutil import easter - if isinstance(byeaster, integer_types): - self._byeaster = (byeaster,) - else: - self._byeaster = tuple(sorted(byeaster)) - - self._original_rule['byeaster'] = self._byeaster - else: - self._byeaster = None - - # bymonthday - if bymonthday is None: - self._bymonthday = () - self._bynmonthday = () - else: - if isinstance(bymonthday, integer_types): - bymonthday = (bymonthday,) - - bymonthday = set(bymonthday) # Ensure it's unique - - self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) - self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) - - # Storing positive numbers first, then negative numbers - if 'bymonthday' not in self._original_rule: - self._original_rule['bymonthday'] = tuple( - itertools.chain(self._bymonthday, self._bynmonthday)) - - # byweekno - if byweekno is None: - self._byweekno = None - else: - if isinstance(byweekno, integer_types): - byweekno = (byweekno,) - - self._byweekno = tuple(sorted(set(byweekno))) - - self._original_rule['byweekno'] = self._byweekno - - # byweekday / bynweekday - if byweekday is None: - self._byweekday = None - self._bynweekday = None - else: - # If it's one of the valid non-sequence types, convert to a - # single-element sequence before the iterator that builds the - # byweekday set. - if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): - byweekday = (byweekday,) - - self._byweekday = set() - self._bynweekday = set() - for wday in byweekday: - if isinstance(wday, integer_types): - self._byweekday.add(wday) - elif not wday.n or freq > MONTHLY: - self._byweekday.add(wday.weekday) - else: - self._bynweekday.add((wday.weekday, wday.n)) - - if not self._byweekday: - self._byweekday = None - elif not self._bynweekday: - self._bynweekday = None - - if self._byweekday is not None: - self._byweekday = tuple(sorted(self._byweekday)) - orig_byweekday = [weekday(x) for x in self._byweekday] - else: - orig_byweekday = () - - if self._bynweekday is not None: - self._bynweekday = tuple(sorted(self._bynweekday)) - orig_bynweekday = [weekday(*x) for x in self._bynweekday] - else: - orig_bynweekday = () - - if 'byweekday' not in self._original_rule: - self._original_rule['byweekday'] = tuple(itertools.chain( - orig_byweekday, orig_bynweekday)) - - # byhour - if byhour is None: - if freq < HOURLY: - self._byhour = {dtstart.hour} - else: - self._byhour = None - else: - if isinstance(byhour, integer_types): - byhour = (byhour,) - - if freq == HOURLY: - self._byhour = self.__construct_byset(start=dtstart.hour, - byxxx=byhour, - base=24) - else: - self._byhour = set(byhour) - - self._byhour = tuple(sorted(self._byhour)) - self._original_rule['byhour'] = self._byhour - - # byminute - if byminute is None: - if freq < MINUTELY: - self._byminute = {dtstart.minute} - else: - self._byminute = None - else: - if isinstance(byminute, integer_types): - byminute = (byminute,) - - if freq == MINUTELY: - self._byminute = self.__construct_byset(start=dtstart.minute, - byxxx=byminute, - base=60) - else: - self._byminute = set(byminute) - - self._byminute = tuple(sorted(self._byminute)) - self._original_rule['byminute'] = self._byminute - - # bysecond - if bysecond is None: - if freq < SECONDLY: - self._bysecond = ((dtstart.second,)) - else: - self._bysecond = None - else: - if isinstance(bysecond, integer_types): - bysecond = (bysecond,) - - self._bysecond = set(bysecond) - - if freq == SECONDLY: - self._bysecond = self.__construct_byset(start=dtstart.second, - byxxx=bysecond, - base=60) - else: - self._bysecond = set(bysecond) - - self._bysecond = tuple(sorted(self._bysecond)) - self._original_rule['bysecond'] = self._bysecond - - if self._freq >= HOURLY: - self._timeset = None - else: - self._timeset = [] - for hour in self._byhour: - for minute in self._byminute: - for second in self._bysecond: - self._timeset.append( - datetime.time(hour, minute, second, - tzinfo=self._tzinfo)) - self._timeset.sort() - self._timeset = tuple(self._timeset) - - def __str__(self): - """ - Output a string that would generate this RRULE if passed to rrulestr. - This is mostly compatible with RFC5545, except for the - dateutil-specific extension BYEASTER. - """ - - output = [] - h, m, s = [None] * 3 - if self._dtstart: - output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) - h, m, s = self._dtstart.timetuple()[3:6] - - parts = ['FREQ=' + FREQNAMES[self._freq]] - if self._interval != 1: - parts.append('INTERVAL=' + str(self._interval)) - - if self._wkst: - parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) - - if self._count is not None: - parts.append('COUNT=' + str(self._count)) - - if self._until: - parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) - - if self._original_rule.get('byweekday') is not None: - # The str() method on weekday objects doesn't generate - # RFC5545-compliant strings, so we should modify that. - original_rule = dict(self._original_rule) - wday_strings = [] - for wday in original_rule['byweekday']: - if wday.n: - wday_strings.append('{n:+d}{wday}'.format( - n=wday.n, - wday=repr(wday)[0:2])) - else: - wday_strings.append(repr(wday)) - - original_rule['byweekday'] = wday_strings - else: - original_rule = self._original_rule - - partfmt = '{name}={vals}' - for name, key in [('BYSETPOS', 'bysetpos'), - ('BYMONTH', 'bymonth'), - ('BYMONTHDAY', 'bymonthday'), - ('BYYEARDAY', 'byyearday'), - ('BYWEEKNO', 'byweekno'), - ('BYDAY', 'byweekday'), - ('BYHOUR', 'byhour'), - ('BYMINUTE', 'byminute'), - ('BYSECOND', 'bysecond'), - ('BYEASTER', 'byeaster')]: - value = original_rule.get(key) - if value: - parts.append(partfmt.format(name=name, vals=(','.join(str(v) - for v in value)))) - - output.append('RRULE:' + ';'.join(parts)) - return '\n'.join(output) - - def replace(self, **kwargs): - """Return new rrule with same attributes except for those attributes given new - values by whichever keyword arguments are specified.""" - new_kwargs = {"interval": self._interval, - "count": self._count, - "dtstart": self._dtstart, - "freq": self._freq, - "until": self._until, - "wkst": self._wkst, - "cache": False if self._cache is None else True } - new_kwargs.update(self._original_rule) - new_kwargs.update(kwargs) - return rrule(**new_kwargs) - - def _iter(self): - year, month, day, hour, minute, second, weekday, yearday, _ = \ - self._dtstart.timetuple() - - # Some local variables to speed things up a bit - freq = self._freq - interval = self._interval - wkst = self._wkst - until = self._until - bymonth = self._bymonth - byweekno = self._byweekno - byyearday = self._byyearday - byweekday = self._byweekday - byeaster = self._byeaster - bymonthday = self._bymonthday - bynmonthday = self._bynmonthday - bysetpos = self._bysetpos - byhour = self._byhour - byminute = self._byminute - bysecond = self._bysecond - - ii = _iterinfo(self) - ii.rebuild(year, month) - - getdayset = {YEARLY: ii.ydayset, - MONTHLY: ii.mdayset, - WEEKLY: ii.wdayset, - DAILY: ii.ddayset, - HOURLY: ii.ddayset, - MINUTELY: ii.ddayset, - SECONDLY: ii.ddayset}[freq] - - if freq < HOURLY: - timeset = self._timeset - else: - gettimeset = {HOURLY: ii.htimeset, - MINUTELY: ii.mtimeset, - SECONDLY: ii.stimeset}[freq] - if ((freq >= HOURLY and - self._byhour and hour not in self._byhour) or - (freq >= MINUTELY and - self._byminute and minute not in self._byminute) or - (freq >= SECONDLY and - self._bysecond and second not in self._bysecond)): - timeset = () - else: - timeset = gettimeset(hour, minute, second) - - total = 0 - count = self._count - while True: - # Get dayset with the right frequency - dayset, start, end = getdayset(year, month, day) - - # Do the "hard" work ;-) - filtered = False - for i in dayset[start:end]: - if ((bymonth and ii.mmask[i] not in bymonth) or - (byweekno and not ii.wnomask[i]) or - (byweekday and ii.wdaymask[i] not in byweekday) or - (ii.nwdaymask and not ii.nwdaymask[i]) or - (byeaster and not ii.eastermask[i]) or - ((bymonthday or bynmonthday) and - ii.mdaymask[i] not in bymonthday and - ii.nmdaymask[i] not in bynmonthday) or - (byyearday and - ((i < ii.yearlen and i+1 not in byyearday and - -ii.yearlen+i not in byyearday) or - (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and - -ii.nextyearlen+i-ii.yearlen not in byyearday)))): - dayset[i] = None - filtered = True - - # Output results - if bysetpos and timeset: - poslist = [] - for pos in bysetpos: - if pos < 0: - daypos, timepos = divmod(pos, len(timeset)) - else: - daypos, timepos = divmod(pos-1, len(timeset)) - try: - i = [x for x in dayset[start:end] - if x is not None][daypos] - time = timeset[timepos] - except IndexError: - pass - else: - date = datetime.date.fromordinal(ii.yearordinal+i) - res = datetime.datetime.combine(date, time) - if res not in poslist: - poslist.append(res) - poslist.sort() - for res in poslist: - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - total += 1 - yield res - else: - for i in dayset[start:end]: - if i is not None: - date = datetime.date.fromordinal(ii.yearordinal + i) - for time in timeset: - res = datetime.datetime.combine(date, time) - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - - total += 1 - yield res - - # Handle frequency and interval - fixday = False - if freq == YEARLY: - year += interval - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == MONTHLY: - month += interval - if month > 12: - div, mod = divmod(month, 12) - month = mod - year += div - if month == 0: - month = 12 - year -= 1 - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == WEEKLY: - if wkst > weekday: - day += -(weekday+1+(6-wkst))+self._interval*7 - else: - day += -(weekday-wkst)+self._interval*7 - weekday = wkst - fixday = True - elif freq == DAILY: - day += interval - fixday = True - elif freq == HOURLY: - if filtered: - # Jump to one iteration before next day - hour += ((23-hour)//interval)*interval - - if byhour: - ndays, hour = self.__mod_distance(value=hour, - byxxx=self._byhour, - base=24) - else: - ndays, hour = divmod(hour+interval, 24) - - if ndays: - day += ndays - fixday = True - - timeset = gettimeset(hour, minute, second) - elif freq == MINUTELY: - if filtered: - # Jump to one iteration before next day - minute += ((1439-(hour*60+minute))//interval)*interval - - valid = False - rep_rate = (24*60) - for j in range(rep_rate // gcd(interval, rep_rate)): - if byminute: - nhours, minute = \ - self.__mod_distance(value=minute, - byxxx=self._byminute, - base=60) - else: - nhours, minute = divmod(minute+interval, 60) - - div, hour = divmod(hour+nhours, 24) - if div: - day += div - fixday = True - filtered = False - - if not byhour or hour in byhour: - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval and ' + - 'byhour resulting in empty rule.') - - timeset = gettimeset(hour, minute, second) - elif freq == SECONDLY: - if filtered: - # Jump to one iteration before next day - second += (((86399 - (hour * 3600 + minute * 60 + second)) - // interval) * interval) - - rep_rate = (24 * 3600) - valid = False - for j in range(0, rep_rate // gcd(interval, rep_rate)): - if bysecond: - nminutes, second = \ - self.__mod_distance(value=second, - byxxx=self._bysecond, - base=60) - else: - nminutes, second = divmod(second+interval, 60) - - div, minute = divmod(minute+nminutes, 60) - if div: - hour += div - div, hour = divmod(hour, 24) - if div: - day += div - fixday = True - - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute) and - (not bysecond or second in bysecond)): - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval, ' + - 'byhour and byminute resulting in empty' + - ' rule.') - - timeset = gettimeset(hour, minute, second) - - if fixday and day > 28: - daysinmonth = calendar.monthrange(year, month)[1] - if day > daysinmonth: - while day > daysinmonth: - day -= daysinmonth - month += 1 - if month == 13: - month = 1 - year += 1 - if year > datetime.MAXYEAR: - self._len = total - return - daysinmonth = calendar.monthrange(year, month)[1] - ii.rebuild(year, month) - - def __construct_byset(self, start, byxxx, base): - """ - If a `BYXXX` sequence is passed to the constructor at the same level as - `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some - specifications which cannot be reached given some starting conditions. - - This occurs whenever the interval is not coprime with the base of a - given unit and the difference between the starting position and the - ending position is not coprime with the greatest common denominator - between the interval and the base. For example, with a FREQ of hourly - starting at 17:00 and an interval of 4, the only valid values for - BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not - coprime. - - :param start: - Specifies the starting position. - :param byxxx: - An iterable containing the list of allowed values. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - This does not preserve the type of the iterable, returning a set, since - the values should be unique and the order is irrelevant, this will - speed up later lookups. - - In the event of an empty set, raises a :exception:`ValueError`, as this - results in an empty rrule. - """ - - cset = set() - - # Support a single byxxx value. - if isinstance(byxxx, integer_types): - byxxx = (byxxx, ) - - for num in byxxx: - i_gcd = gcd(self._interval, base) - # Use divmod rather than % because we need to wrap negative nums. - if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: - cset.add(num) - - if len(cset) == 0: - raise ValueError("Invalid rrule byxxx generates an empty set.") - - return cset - - def __mod_distance(self, value, byxxx, base): - """ - Calculates the next value in a sequence where the `FREQ` parameter is - specified along with a `BYXXX` parameter at the same "level" - (e.g. `HOURLY` specified with `BYHOUR`). - - :param value: - The old value of the component. - :param byxxx: - The `BYXXX` set, which should have been generated by - `rrule._construct_byset`, or something else which checks that a - valid rule is present. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - If a valid value is not found after `base` iterations (the maximum - number before the sequence would start to repeat), this raises a - :exception:`ValueError`, as no valid values were found. - - This returns a tuple of `divmod(n*interval, base)`, where `n` is the - smallest number of `interval` repetitions until the next specified - value in `byxxx` is found. - """ - accumulator = 0 - for ii in range(1, base + 1): - # Using divmod() over % to account for negative intervals - div, value = divmod(value + self._interval, base) - accumulator += div - if value in byxxx: - return (accumulator, value) - - -class _iterinfo(object): - __slots__ = ["rrule", "lastyear", "lastmonth", - "yearlen", "nextyearlen", "yearordinal", "yearweekday", - "mmask", "mrange", "mdaymask", "nmdaymask", - "wdaymask", "wnomask", "nwdaymask", "eastermask"] - - def __init__(self, rrule): - for attr in self.__slots__: - setattr(self, attr, None) - self.rrule = rrule - - def rebuild(self, year, month): - # Every mask is 7 days longer to handle cross-year weekly periods. - rr = self.rrule - if year != self.lastyear: - self.yearlen = 365 + calendar.isleap(year) - self.nextyearlen = 365 + calendar.isleap(year + 1) - firstyday = datetime.date(year, 1, 1) - self.yearordinal = firstyday.toordinal() - self.yearweekday = firstyday.weekday() - - wday = datetime.date(year, 1, 1).weekday() - if self.yearlen == 365: - self.mmask = M365MASK - self.mdaymask = MDAY365MASK - self.nmdaymask = NMDAY365MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M365RANGE - else: - self.mmask = M366MASK - self.mdaymask = MDAY366MASK - self.nmdaymask = NMDAY366MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M366RANGE - - if not rr._byweekno: - self.wnomask = None - else: - self.wnomask = [0]*(self.yearlen+7) - # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) - no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 - if no1wkst >= 4: - no1wkst = 0 - # Number of days in the year, plus the days we got - # from last year. - wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 - else: - # Number of days in the year, minus the days we - # left in last year. - wyearlen = self.yearlen-no1wkst - div, mod = divmod(wyearlen, 7) - numweeks = div+mod//4 - for n in rr._byweekno: - if n < 0: - n += numweeks+1 - if not (0 < n <= numweeks): - continue - if n > 1: - i = no1wkst+(n-1)*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - else: - i = no1wkst - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if 1 in rr._byweekno: - # Check week number 1 of next year as well - # TODO: Check -numweeks for next year. - i = no1wkst+numweeks*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - if i < self.yearlen: - # If week starts in next year, we - # don't care about it. - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if no1wkst: - # Check last week number of last year as - # well. If no1wkst is 0, either the year - # started on week start, or week number 1 - # got days from last year, so there are no - # days from last year's last week number in - # this year. - if -1 not in rr._byweekno: - lyearweekday = datetime.date(year-1, 1, 1).weekday() - lno1wkst = (7-lyearweekday+rr._wkst) % 7 - lyearlen = 365+calendar.isleap(year-1) - if lno1wkst >= 4: - lno1wkst = 0 - lnumweeks = 52+(lyearlen + - (lyearweekday-rr._wkst) % 7) % 7//4 - else: - lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 - else: - lnumweeks = -1 - if lnumweeks in rr._byweekno: - for i in range(no1wkst): - self.wnomask[i] = 1 - - if (rr._bynweekday and (month != self.lastmonth or - year != self.lastyear)): - ranges = [] - if rr._freq == YEARLY: - if rr._bymonth: - for month in rr._bymonth: - ranges.append(self.mrange[month-1:month+1]) - else: - ranges = [(0, self.yearlen)] - elif rr._freq == MONTHLY: - ranges = [self.mrange[month-1:month+1]] - if ranges: - # Weekly frequency won't get here, so we may not - # care about cross-year weekly periods. - self.nwdaymask = [0]*self.yearlen - for first, last in ranges: - last -= 1 - for wday, n in rr._bynweekday: - if n < 0: - i = last+(n+1)*7 - i -= (self.wdaymask[i]-wday) % 7 - else: - i = first+(n-1)*7 - i += (7-self.wdaymask[i]+wday) % 7 - if first <= i <= last: - self.nwdaymask[i] = 1 - - if rr._byeaster: - self.eastermask = [0]*(self.yearlen+7) - eyday = easter.easter(year).toordinal()-self.yearordinal - for offset in rr._byeaster: - self.eastermask[eyday+offset] = 1 - - self.lastyear = year - self.lastmonth = month - - def ydayset(self, year, month, day): - return list(range(self.yearlen)), 0, self.yearlen - - def mdayset(self, year, month, day): - dset = [None]*self.yearlen - start, end = self.mrange[month-1:month+1] - for i in range(start, end): - dset[i] = i - return dset, start, end - - def wdayset(self, year, month, day): - # We need to handle cross-year weeks here. - dset = [None]*(self.yearlen+7) - i = datetime.date(year, month, day).toordinal()-self.yearordinal - start = i - for j in range(7): - dset[i] = i - i += 1 - # if (not (0 <= i < self.yearlen) or - # self.wdaymask[i] == self.rrule._wkst): - # This will cross the year boundary, if necessary. - if self.wdaymask[i] == self.rrule._wkst: - break - return dset, start, i - - def ddayset(self, year, month, day): - dset = [None] * self.yearlen - i = datetime.date(year, month, day).toordinal() - self.yearordinal - dset[i] = i - return dset, i, i + 1 - - def htimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for minute in rr._byminute: - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, - tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def mtimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def stimeset(self, hour, minute, second): - return (datetime.time(hour, minute, second, - tzinfo=self.rrule._tzinfo),) - - -class rruleset(rrulebase): - """ The rruleset type allows more complex recurrence setups, mixing - multiple rules, dates, exclusion rules, and exclusion dates. The type - constructor takes the following keyword arguments: - - :param cache: If True, caching of results will be enabled, improving - performance of multiple queries considerably. """ - - class _genitem(object): - def __init__(self, genlist, gen): - try: - self.dt = advance_iterator(gen) - genlist.append(self) - except StopIteration: - pass - self.genlist = genlist - self.gen = gen - - def __next__(self): - try: - self.dt = advance_iterator(self.gen) - except StopIteration: - if self.genlist[0] is self: - heapq.heappop(self.genlist) - else: - self.genlist.remove(self) - heapq.heapify(self.genlist) - - next = __next__ - - def __lt__(self, other): - return self.dt < other.dt - - def __gt__(self, other): - return self.dt > other.dt - - def __eq__(self, other): - return self.dt == other.dt - - def __ne__(self, other): - return self.dt != other.dt - - def __init__(self, cache=False): - super(rruleset, self).__init__(cache) - self._rrule = [] - self._rdate = [] - self._exrule = [] - self._exdate = [] - - @_invalidates_cache - def rrule(self, rrule): - """ Include the given :py:class:`rrule` instance in the recurrence set - generation. """ - self._rrule.append(rrule) - - @_invalidates_cache - def rdate(self, rdate): - """ Include the given :py:class:`datetime` instance in the recurrence - set generation. """ - self._rdate.append(rdate) - - @_invalidates_cache - def exrule(self, exrule): - """ Include the given rrule instance in the recurrence set exclusion - list. Dates which are part of the given recurrence rules will not - be generated, even if some inclusive rrule or rdate matches them. - """ - self._exrule.append(exrule) - - @_invalidates_cache - def exdate(self, exdate): - """ Include the given datetime instance in the recurrence set - exclusion list. Dates included that way will not be generated, - even if some inclusive rrule or rdate matches them. """ - self._exdate.append(exdate) - - def _iter(self): - rlist = [] - self._rdate.sort() - self._genitem(rlist, iter(self._rdate)) - for gen in [iter(x) for x in self._rrule]: - self._genitem(rlist, gen) - exlist = [] - self._exdate.sort() - self._genitem(exlist, iter(self._exdate)) - for gen in [iter(x) for x in self._exrule]: - self._genitem(exlist, gen) - lastdt = None - total = 0 - heapq.heapify(rlist) - heapq.heapify(exlist) - while rlist: - ritem = rlist[0] - if not lastdt or lastdt != ritem.dt: - while exlist and exlist[0] < ritem: - exitem = exlist[0] - advance_iterator(exitem) - if exlist and exlist[0] is exitem: - heapq.heapreplace(exlist, exitem) - if not exlist or ritem != exlist[0]: - total += 1 - yield ritem.dt - lastdt = ritem.dt - advance_iterator(ritem) - if rlist and rlist[0] is ritem: - heapq.heapreplace(rlist, ritem) - self._len = total - - - - -class _rrulestr(object): - """ Parses a string representation of a recurrence rule or set of - recurrence rules. - - :param s: - Required, a string defining one or more recurrence rules. - - :param dtstart: - If given, used as the default recurrence start if not specified in the - rule string. - - :param cache: - If set ``True`` caching of results will be enabled, improving - performance of multiple queries considerably. - - :param unfold: - If set ``True`` indicates that a rule string is split over more - than one line and should be joined before processing. - - :param forceset: - If set ``True`` forces a :class:`dateutil.rrule.rruleset` to - be returned. - - :param compatible: - If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime.datetime` object is returned. - - :param tzids: - If given, a callable or mapping used to retrieve a - :class:`datetime.tzinfo` from a string representation. - Defaults to :func:`dateutil.tz.gettz`. - - :param tzinfos: - Additional time zone names / aliases which may be present in a string - representation. See :func:`dateutil.parser.parse` for more - information. - - :return: - Returns a :class:`dateutil.rrule.rruleset` or - :class:`dateutil.rrule.rrule` - """ - - _freq_map = {"YEARLY": YEARLY, - "MONTHLY": MONTHLY, - "WEEKLY": WEEKLY, - "DAILY": DAILY, - "HOURLY": HOURLY, - "MINUTELY": MINUTELY, - "SECONDLY": SECONDLY} - - _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, - "FR": 4, "SA": 5, "SU": 6} - - def _handle_int(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = int(value) - - def _handle_int_list(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = [int(x) for x in value.split(',')] - - _handle_INTERVAL = _handle_int - _handle_COUNT = _handle_int - _handle_BYSETPOS = _handle_int_list - _handle_BYMONTH = _handle_int_list - _handle_BYMONTHDAY = _handle_int_list - _handle_BYYEARDAY = _handle_int_list - _handle_BYEASTER = _handle_int_list - _handle_BYWEEKNO = _handle_int_list - _handle_BYHOUR = _handle_int_list - _handle_BYMINUTE = _handle_int_list - _handle_BYSECOND = _handle_int_list - - def _handle_FREQ(self, rrkwargs, name, value, **kwargs): - rrkwargs["freq"] = self._freq_map[value] - - def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): - global parser - if not parser: - from dateutil import parser - try: - rrkwargs["until"] = parser.parse(value, - ignoretz=kwargs.get("ignoretz"), - tzinfos=kwargs.get("tzinfos")) - except ValueError: - raise ValueError("invalid until date") - - def _handle_WKST(self, rrkwargs, name, value, **kwargs): - rrkwargs["wkst"] = self._weekday_map[value] - - def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): - """ - Two ways to specify this: +1MO or MO(+1) - """ - l = [] - for wday in value.split(','): - if '(' in wday: - # If it's of the form TH(+1), etc. - splt = wday.split('(') - w = splt[0] - n = int(splt[1][:-1]) - elif len(wday): - # If it's of the form +1MO - for i in range(len(wday)): - if wday[i] not in '+-0123456789': - break - n = wday[:i] or None - w = wday[i:] - if n: - n = int(n) - else: - raise ValueError("Invalid (empty) BYDAY specification.") - - l.append(weekdays[self._weekday_map[w]](n)) - rrkwargs["byweekday"] = l - - _handle_BYDAY = _handle_BYWEEKDAY - - def _parse_rfc_rrule(self, line, - dtstart=None, - cache=False, - ignoretz=False, - tzinfos=None): - if line.find(':') != -1: - name, value = line.split(':') - if name != "RRULE": - raise ValueError("unknown parameter name") - else: - value = line - rrkwargs = {} - for pair in value.split(';'): - name, value = pair.split('=') - name = name.upper() - value = value.upper() - try: - getattr(self, "_handle_"+name)(rrkwargs, name, value, - ignoretz=ignoretz, - tzinfos=tzinfos) - except AttributeError: - raise ValueError("unknown parameter '%s'" % name) - except (KeyError, ValueError): - raise ValueError("invalid '%s': %s" % (name, value)) - return rrule(dtstart=dtstart, cache=cache, **rrkwargs) - - def _parse_date_value(self, date_value, parms, rule_tzids, - ignoretz, tzids, tzinfos): - global parser - if not parser: - from dateutil import parser - - datevals = [] - value_found = False - TZID = None - - for parm in parms: - if parm.startswith("TZID="): - try: - tzkey = rule_tzids[parm.split('TZID=')[-1]] - except KeyError: - continue - if tzids is None: - from . import tz - tzlookup = tz.gettz - elif callable(tzids): - tzlookup = tzids - else: - tzlookup = getattr(tzids, 'get', None) - if tzlookup is None: - msg = ('tzids must be a callable, mapping, or None, ' - 'not %s' % tzids) - raise ValueError(msg) - - TZID = tzlookup(tzkey) - continue - - # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found - # only once. - if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: - raise ValueError("unsupported parm: " + parm) - else: - if value_found: - msg = ("Duplicate value parameter found in: " + parm) - raise ValueError(msg) - value_found = True - - for datestr in date_value.split(','): - date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) - if TZID is not None: - if date.tzinfo is None: - date = date.replace(tzinfo=TZID) - else: - raise ValueError('DTSTART/EXDATE specifies multiple timezone') - datevals.append(date) - - return datevals - - def _parse_rfc(self, s, - dtstart=None, - cache=False, - unfold=False, - forceset=False, - compatible=False, - ignoretz=False, - tzids=None, - tzinfos=None): - global parser - if compatible: - forceset = True - unfold = True - - TZID_NAMES = dict(map( - lambda x: (x.upper(), x), - re.findall('TZID=(?P[^:]+):', s) - )) - s = s.upper() - if not s.strip(): - raise ValueError("empty string") - if unfold: - lines = s.splitlines() - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - else: - lines = s.split() - if (not forceset and len(lines) == 1 and (s.find(':') == -1 or - s.startswith('RRULE:'))): - return self._parse_rfc_rrule(lines[0], cache=cache, - dtstart=dtstart, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - rrulevals = [] - rdatevals = [] - exrulevals = [] - exdatevals = [] - for line in lines: - if not line: - continue - if line.find(':') == -1: - name = "RRULE" - value = line - else: - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0] - parms = parms[1:] - if name == "RRULE": - for parm in parms: - raise ValueError("unsupported RRULE parm: "+parm) - rrulevals.append(value) - elif name == "RDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError("unsupported RDATE parm: "+parm) - rdatevals.append(value) - elif name == "EXRULE": - for parm in parms: - raise ValueError("unsupported EXRULE parm: "+parm) - exrulevals.append(value) - elif name == "EXDATE": - exdatevals.extend( - self._parse_date_value(value, parms, - TZID_NAMES, ignoretz, - tzids, tzinfos) - ) - elif name == "DTSTART": - dtvals = self._parse_date_value(value, parms, TZID_NAMES, - ignoretz, tzids, tzinfos) - if len(dtvals) != 1: - raise ValueError("Multiple DTSTART values specified:" + - value) - dtstart = dtvals[0] - else: - raise ValueError("unsupported property: "+name) - if (forceset or len(rrulevals) > 1 or rdatevals - or exrulevals or exdatevals): - if not parser and (rdatevals or exdatevals): - from dateutil import parser - rset = rruleset(cache=cache) - for value in rrulevals: - rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in rdatevals: - for datestr in value.split(','): - rset.rdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exrulevals: - rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exdatevals: - rset.exdate(value) - if compatible and dtstart: - rset.rdate(dtstart) - return rset - else: - return self._parse_rfc_rrule(rrulevals[0], - dtstart=dtstart, - cache=cache, - ignoretz=ignoretz, - tzinfos=tzinfos) - - def __call__(self, s, **kwargs): - return self._parse_rfc(s, **kwargs) - - -rrulestr = _rrulestr() - -# vim:ts=4:sw=4:et diff --git a/utill/dateutil/tz/__init__.py b/utill/dateutil/tz/__init__.py deleted file mode 100644 index 5a2d9cd..0000000 --- a/utill/dateutil/tz/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -from .tz import * -from .tz import __doc__ - -#: Convenience constant providing a :class:`tzutc()` instance -#: -#: .. versionadded:: 2.7.0 -UTC = tzutc() - -__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", - "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", - "enfold", "datetime_ambiguous", "datetime_exists", - "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] - - -class DeprecatedTzFormatWarning(Warning): - """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/utill/dateutil/tz/_common.py b/utill/dateutil/tz/_common.py deleted file mode 100644 index 594e082..0000000 --- a/utill/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurance, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/utill/dateutil/tz/_factories.py b/utill/dateutil/tz/_factories.py deleted file mode 100644 index d2560eb..0000000 --- a/utill/dateutil/tz/_factories.py +++ /dev/null @@ -1,73 +0,0 @@ -from datetime import timedelta -import weakref -from collections import OrderedDict - - -class _TzSingleton(type): - def __init__(cls, *args, **kwargs): - cls.__instance = None - super(_TzSingleton, cls).__init__(*args, **kwargs) - - def __call__(cls): - if cls.__instance is None: - cls.__instance = super(_TzSingleton, cls).__call__() - return cls.__instance - - -class _TzFactory(type): - def instance(cls, *args, **kwargs): - """Alternate constructor that returns a fresh instance""" - return type.__call__(cls, *args, **kwargs) - - -class _TzOffsetFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - def __call__(cls, name, offset): - if isinstance(offset, timedelta): - key = (name, offset.total_seconds()) - else: - key = (name, offset) - - instance = cls.__instances.get(key, None) - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(name, offset)) - - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - # TODO: Maybe this should be under a lock? - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - - -class _TzStrFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - def __call__(cls, s, posix_offset=False): - key = (s, posix_offset) - instance = cls.__instances.get(key, None) - - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(s, posix_offset)) - - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - - # Remove an item if the strong cache is overpopulated - # TODO: Maybe this should be under a lock? - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - diff --git a/utill/dateutil/tz/tz.py b/utill/dateutil/tz/tz.py deleted file mode 100644 index d05414e..0000000 --- a/utill/dateutil/tz/tz.py +++ /dev/null @@ -1,1836 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers timezone implementations subclassing the abstract -:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format -files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, -etc), TZ environment string (in all known formats), given ranges (with help -from relative deltas), local machine timezone, fixed offset timezone, and UTC -timezone. -""" -import datetime -import struct -import time -import sys -import os -import bisect -import weakref -from collections import OrderedDict - -import six -from six import string_types -from six.moves import _thread -from ._common import tzname_in_python2, _tzinfo -from ._common import tzrangebase, enfold -from ._common import _validate_fromutc_inputs - -from ._factories import _TzSingleton, _TzOffsetFactory -from ._factories import _TzStrFactory -try: - from .win import tzwin, tzwinlocal -except ImportError: - tzwin = tzwinlocal = None - -# For warning about rounding tzinfo -from warnings import warn - -ZERO = datetime.timedelta(0) -EPOCH = datetime.datetime.utcfromtimestamp(0) -EPOCHORDINAL = EPOCH.toordinal() - - -@six.add_metaclass(_TzSingleton) -class tzutc(datetime.tzinfo): - """ - This is a tzinfo object that represents the UTC time zone. - - **Examples:** - - .. doctest:: - - >>> from datetime import * - >>> from dateutil.tz import * - - >>> datetime.now() - datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) - - >>> datetime.now(tzutc()) - datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) - - >>> datetime.now(tzutc()).tzname() - 'UTC' - - .. versionchanged:: 2.7.0 - ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will - always return the same object. - - .. doctest:: - - >>> from dateutil.tz import tzutc, UTC - >>> tzutc() is tzutc() - True - >>> tzutc() is UTC - True - """ - def utcoffset(self, dt): - return ZERO - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return "UTC" - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Fast track version of fromutc() returns the original ``dt`` object for - any valid :py:class:`datetime.datetime` object. - """ - return dt - - def __eq__(self, other): - if not isinstance(other, (tzutc, tzoffset)): - return NotImplemented - - return (isinstance(other, tzutc) or - (isinstance(other, tzoffset) and other._offset == ZERO)) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -@six.add_metaclass(_TzOffsetFactory) -class tzoffset(datetime.tzinfo): - """ - A simple class for representing a fixed offset from UTC. - - :param name: - The timezone name, to be returned when ``tzname()`` is called. - :param offset: - The time zone offset in seconds, or (since version 2.6.0, represented - as a :py:class:`datetime.timedelta` object). - """ - def __init__(self, name, offset): - self._name = name - - try: - # Allow a timedelta - offset = offset.total_seconds() - except (TypeError, AttributeError): - pass - - self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._name - - @_validate_fromutc_inputs - def fromutc(self, dt): - return dt + self._offset - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - def __eq__(self, other): - if not isinstance(other, tzoffset): - return NotImplemented - - return self._offset == other._offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, - repr(self._name), - int(self._offset.total_seconds())) - - __reduce__ = object.__reduce__ - - -class tzlocal(_tzinfo): - """ - A :class:`tzinfo` subclass built around the ``time`` timezone functions. - """ - def __init__(self): - super(tzlocal, self).__init__() - - self._std_offset = datetime.timedelta(seconds=-time.timezone) - if time.daylight: - self._dst_offset = datetime.timedelta(seconds=-time.altzone) - else: - self._dst_offset = self._std_offset - - self._dst_saved = self._dst_offset - self._std_offset - self._hasdst = bool(self._dst_saved) - self._tznames = tuple(time.tzname) - - def utcoffset(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - self._std_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._tznames[self._isdst(dt)] - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - naive_dst = self._naive_is_dst(dt) - return (not naive_dst and - (naive_dst != self._naive_is_dst(dt - self._dst_saved))) - - def _naive_is_dst(self, dt): - timestamp = _datetime_to_timestamp(dt) - return time.localtime(timestamp + time.timezone).tm_isdst - - def _isdst(self, dt, fold_naive=True): - # We can't use mktime here. It is unstable when deciding if - # the hour near to a change is DST or not. - # - # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, - # dt.minute, dt.second, dt.weekday(), 0, -1)) - # return time.localtime(timestamp).tm_isdst - # - # The code above yields the following result: - # - # >>> import tz, datetime - # >>> t = tz.tzlocal() - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # - # Here is a more stable implementation: - # - if not self._hasdst: - return False - - # Check for ambiguous times: - dstval = self._naive_is_dst(dt) - fold = getattr(dt, 'fold', None) - - if self.is_ambiguous(dt): - if fold is not None: - return not self._fold(dt) - else: - return True - - return dstval - - def __eq__(self, other): - if isinstance(other, tzlocal): - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset) - elif isinstance(other, tzutc): - return (not self._hasdst and - self._tznames[0] in {'UTC', 'GMT'} and - self._std_offset == ZERO) - elif isinstance(other, tzoffset): - return (not self._hasdst and - self._tznames[0] == other._name and - self._std_offset == other._offset) - else: - return NotImplemented - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -class _ttinfo(object): - __slots__ = ["offset", "delta", "isdst", "abbr", - "isstd", "isgmt", "dstoffset"] - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def __repr__(self): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - - def __eq__(self, other): - if not isinstance(other, _ttinfo): - return NotImplemented - - return (self.offset == other.offset and - self.delta == other.delta and - self.isdst == other.isdst and - self.abbr == other.abbr and - self.isstd == other.isstd and - self.isgmt == other.isgmt and - self.dstoffset == other.dstoffset) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __getstate__(self): - state = {} - for name in self.__slots__: - state[name] = getattr(self, name, None) - return state - - def __setstate__(self, state): - for name in self.__slots__: - if name in state: - setattr(self, name, state[name]) - - -class _tzfile(object): - """ - Lightweight class for holding the relevant transition and time zone - information read from binary tzfiles. - """ - attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', - 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] - - def __init__(self, **kwargs): - for attr in self.attrs: - setattr(self, attr, kwargs.get(attr, None)) - - -class tzfile(_tzinfo): - """ - This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` - format timezone files to extract current and historical zone information. - - :param fileobj: - This can be an opened file stream or a file name that the time zone - information can be read from. - - :param filename: - This is an optional parameter specifying the source of the time zone - information in the event that ``fileobj`` is a file object. If omitted - and ``fileobj`` is a file stream, this parameter will be set either to - ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. - - See `Sources for Time Zone and Daylight Saving Time Data - `_ for more information. - Time zone files can be compiled from the `IANA Time Zone database files - `_ with the `zic time zone compiler - `_ - - .. note:: - - Only construct a ``tzfile`` directly if you have a specific timezone - file on disk that you want to read into a Python ``tzinfo`` object. - If you want to get a ``tzfile`` representing a specific IANA zone, - (e.g. ``'America/New_York'``), you should call - :func:`dateutil.tz.gettz` with the zone identifier. - - - **Examples:** - - Using the US Eastern time zone as an example, we can see that a ``tzfile`` - provides time zone information for the standard Daylight Saving offsets: - - .. testsetup:: tzfile - - from dateutil.tz import gettz - from datetime import datetime - - .. doctest:: tzfile - - >>> NYC = gettz('America/New_York') - >>> NYC - tzfile('/usr/share/zoneinfo/America/New_York') - - >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST - 2016-01-03 00:00:00-05:00 - - >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT - 2016-07-07 00:00:00-04:00 - - - The ``tzfile`` structure contains a fully history of the time zone, - so historical dates will also have the right offsets. For example, before - the adoption of the UTC standards, New York used local solar mean time: - - .. doctest:: tzfile - - >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT - 1901-04-12 00:00:00-04:56 - - And during World War II, New York was on "Eastern War Time", which was a - state of permanent daylight saving time: - - .. doctest:: tzfile - - >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT - 1944-02-07 00:00:00-04:00 - - """ - - def __init__(self, fileobj, filename=None): - super(tzfile, self).__init__() - - file_opened_here = False - if isinstance(fileobj, string_types): - self._filename = fileobj - fileobj = open(fileobj, 'rb') - file_opened_here = True - elif filename is not None: - self._filename = filename - elif hasattr(fileobj, "name"): - self._filename = fileobj.name - else: - self._filename = repr(fileobj) - - if fileobj is not None: - if not file_opened_here: - fileobj = _nullcontext(fileobj) - - with fileobj as file_stream: - tzobj = self._read_tzfile(file_stream) - - self._set_tzdata(tzobj) - - def _set_tzdata(self, tzobj): - """ Set the time zone data of this object from a _tzfile object """ - # Copy the relevant attributes over as private attributes - for attr in _tzfile.attrs: - setattr(self, '_' + attr, getattr(tzobj, attr)) - - def _read_tzfile(self, fileobj): - out = _tzfile() - - # From tzfile(5): - # - # The time zone information files used by tzset(3) - # begin with the magic characters "TZif" to identify - # them as time zone information files, followed by - # sixteen bytes reserved for future use, followed by - # six four-byte values of type long, written in a - # ``standard'' byte order (the high-order byte - # of the value is written first). - if fileobj.read(4).decode() != "TZif": - raise ValueError("magic not found") - - fileobj.read(16) - - ( - # The number of UTC/local indicators stored in the file. - ttisgmtcnt, - - # The number of standard/wall indicators stored in the file. - ttisstdcnt, - - # The number of leap seconds for which data is - # stored in the file. - leapcnt, - - # The number of "transition times" for which data - # is stored in the file. - timecnt, - - # The number of "local time types" for which data - # is stored in the file (must not be zero). - typecnt, - - # The number of characters of "time zone - # abbreviation strings" stored in the file. - charcnt, - - ) = struct.unpack(">6l", fileobj.read(24)) - - # The above header is followed by tzh_timecnt four-byte - # values of type long, sorted in ascending order. - # These values are written in ``standard'' byte order. - # Each is used as a transition time (as returned by - # time(2)) at which the rules for computing local time - # change. - - if timecnt: - out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, - fileobj.read(timecnt*4))) - else: - out.trans_list_utc = [] - - # Next come tzh_timecnt one-byte values of type unsigned - # char; each one tells which of the different types of - # ``local time'' types described in the file is associated - # with the same-indexed transition time. These values - # serve as indices into an array of ttinfo structures that - # appears next in the file. - - if timecnt: - out.trans_idx = struct.unpack(">%dB" % timecnt, - fileobj.read(timecnt)) - else: - out.trans_idx = [] - - # Each ttinfo structure is written as a four-byte value - # for tt_gmtoff of type long, in a standard byte - # order, followed by a one-byte value for tt_isdst - # and a one-byte value for tt_abbrind. In each - # structure, tt_gmtoff gives the number of - # seconds to be added to UTC, tt_isdst tells whether - # tm_isdst should be set by localtime(3), and - # tt_abbrind serves as an index into the array of - # time zone abbreviation characters that follow the - # ttinfo structure(s) in the file. - - ttinfo = [] - - for i in range(typecnt): - ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) - - abbr = fileobj.read(charcnt).decode() - - # Then there are tzh_leapcnt pairs of four-byte - # values, written in standard byte order; the - # first value of each pair gives the time (as - # returned by time(2)) at which a leap second - # occurs; the second gives the total number of - # leap seconds to be applied after the given time. - # The pairs of values are sorted in ascending order - # by time. - - # Not used, for now (but seek for correct file position) - if leapcnt: - fileobj.seek(leapcnt * 8, os.SEEK_CUR) - - # Then there are tzh_ttisstdcnt standard/wall - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as standard - # time or wall clock time, and are used when - # a time zone file is used in handling POSIX-style - # time zone environment variables. - - if ttisstdcnt: - isstd = struct.unpack(">%db" % ttisstdcnt, - fileobj.read(ttisstdcnt)) - - # Finally, there are tzh_ttisgmtcnt UTC/local - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as UTC or - # local time, and are used when a time zone file - # is used in handling POSIX-style time zone envi- - # ronment variables. - - if ttisgmtcnt: - isgmt = struct.unpack(">%db" % ttisgmtcnt, - fileobj.read(ttisgmtcnt)) - - # Build ttinfo list - out.ttinfo_list = [] - for i in range(typecnt): - gmtoff, isdst, abbrind = ttinfo[i] - gmtoff = _get_supported_offset(gmtoff) - tti = _ttinfo() - tti.offset = gmtoff - tti.dstoffset = datetime.timedelta(0) - tti.delta = datetime.timedelta(seconds=gmtoff) - tti.isdst = isdst - tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] - tti.isstd = (ttisstdcnt > i and isstd[i] != 0) - tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) - out.ttinfo_list.append(tti) - - # Replace ttinfo indexes for ttinfo objects. - out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] - - # Set standard, dst, and before ttinfos. before will be - # used when a given time is before any transitions, - # and will be set to the first non-dst ttinfo, or to - # the first dst, if all of them are dst. - out.ttinfo_std = None - out.ttinfo_dst = None - out.ttinfo_before = None - if out.ttinfo_list: - if not out.trans_list_utc: - out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] - else: - for i in range(timecnt-1, -1, -1): - tti = out.trans_idx[i] - if not out.ttinfo_std and not tti.isdst: - out.ttinfo_std = tti - elif not out.ttinfo_dst and tti.isdst: - out.ttinfo_dst = tti - - if out.ttinfo_std and out.ttinfo_dst: - break - else: - if out.ttinfo_dst and not out.ttinfo_std: - out.ttinfo_std = out.ttinfo_dst - - for tti in out.ttinfo_list: - if not tti.isdst: - out.ttinfo_before = tti - break - else: - out.ttinfo_before = out.ttinfo_list[0] - - # Now fix transition times to become relative to wall time. - # - # I'm not sure about this. In my tests, the tz source file - # is setup to wall time, and in the binary file isstd and - # isgmt are off, so it should be in wall time. OTOH, it's - # always in gmt time. Let me know if you have comments - # about this. - lastdst = None - lastoffset = None - lastdstoffset = None - lastbaseoffset = None - out.trans_list = [] - - for i, tti in enumerate(out.trans_idx): - offset = tti.offset - dstoffset = 0 - - if lastdst is not None: - if tti.isdst: - if not lastdst: - dstoffset = offset - lastoffset - - if not dstoffset and lastdstoffset: - dstoffset = lastdstoffset - - tti.dstoffset = datetime.timedelta(seconds=dstoffset) - lastdstoffset = dstoffset - - # If a time zone changes its base offset during a DST transition, - # then you need to adjust by the previous base offset to get the - # transition time in local time. Otherwise you use the current - # base offset. Ideally, I would have some mathematical proof of - # why this is true, but I haven't really thought about it enough. - baseoffset = offset - dstoffset - adjustment = baseoffset - if (lastbaseoffset is not None and baseoffset != lastbaseoffset - and tti.isdst != lastdst): - # The base DST has changed - adjustment = lastbaseoffset - - lastdst = tti.isdst - lastoffset = offset - lastbaseoffset = baseoffset - - out.trans_list.append(out.trans_list_utc[i] + adjustment) - - out.trans_idx = tuple(out.trans_idx) - out.trans_list = tuple(out.trans_list) - out.trans_list_utc = tuple(out.trans_list_utc) - - return out - - def _find_last_transition(self, dt, in_utc=False): - # If there's no list, there are no transitions to find - if not self._trans_list: - return None - - timestamp = _datetime_to_timestamp(dt) - - # Find where the timestamp fits in the transition list - if the - # timestamp is a transition time, it's part of the "after" period. - trans_list = self._trans_list_utc if in_utc else self._trans_list - idx = bisect.bisect_right(trans_list, timestamp) - - # We want to know when the previous transition was, so subtract off 1 - return idx - 1 - - def _get_ttinfo(self, idx): - # For no list or after the last transition, default to _ttinfo_std - if idx is None or (idx + 1) >= len(self._trans_list): - return self._ttinfo_std - - # If there is a list and the time is before it, return _ttinfo_before - if idx < 0: - return self._ttinfo_before - - return self._trans_idx[idx] - - def _find_ttinfo(self, dt): - idx = self._resolve_ambiguous_time(dt) - - return self._get_ttinfo(idx) - - def fromutc(self, dt): - """ - The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. - - :param dt: - A :py:class:`datetime.datetime` object. - - :raises TypeError: - Raised if ``dt`` is not a :py:class:`datetime.datetime` object. - - :raises ValueError: - Raised if this is called with a ``dt`` which does not have this - ``tzinfo`` attached. - - :return: - Returns a :py:class:`datetime.datetime` object representing the - wall time in ``self``'s time zone. - """ - # These isinstance checks are in datetime.tzinfo, so we'll preserve - # them, even if we don't care about duck typing. - if not isinstance(dt, datetime.datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # First treat UTC as wall time and get the transition we're in. - idx = self._find_last_transition(dt, in_utc=True) - tti = self._get_ttinfo(idx) - - dt_out = dt + datetime.timedelta(seconds=tti.offset) - - fold = self.is_ambiguous(dt_out, idx=idx) - - return enfold(dt_out, fold=int(fold)) - - def is_ambiguous(self, dt, idx=None): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if idx is None: - idx = self._find_last_transition(dt) - - # Calculate the difference in offsets from current to previous - timestamp = _datetime_to_timestamp(dt) - tti = self._get_ttinfo(idx) - - if idx is None or idx <= 0: - return False - - od = self._get_ttinfo(idx - 1).offset - tti.offset - tt = self._trans_list[idx] # Transition time - - return timestamp < tt + od - - def _resolve_ambiguous_time(self, dt): - idx = self._find_last_transition(dt) - - # If we have no transitions, return the index - _fold = self._fold(dt) - if idx is None or idx == 0: - return idx - - # If it's ambiguous and we're in a fold, shift to a different index. - idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) - - return idx - idx_offset - - def utcoffset(self, dt): - if dt is None: - return None - - if not self._ttinfo_std: - return ZERO - - return self._find_ttinfo(dt).delta - - def dst(self, dt): - if dt is None: - return None - - if not self._ttinfo_dst: - return ZERO - - tti = self._find_ttinfo(dt) - - if not tti.isdst: - return ZERO - - # The documentation says that utcoffset()-dst() must - # be constant for every dt. - return tti.dstoffset - - @tzname_in_python2 - def tzname(self, dt): - if not self._ttinfo_std or dt is None: - return None - return self._find_ttinfo(dt).abbr - - def __eq__(self, other): - if not isinstance(other, tzfile): - return NotImplemented - return (self._trans_list == other._trans_list and - self._trans_idx == other._trans_idx and - self._ttinfo_list == other._ttinfo_list) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) - - def __reduce__(self): - return self.__reduce_ex__(None) - - def __reduce_ex__(self, protocol): - return (self.__class__, (None, self._filename), self.__dict__) - - -class tzrange(tzrangebase): - """ - The ``tzrange`` object is a time zone specified by a set of offsets and - abbreviations, equivalent to the way the ``TZ`` variable can be specified - in POSIX-like systems, but using Python delta objects to specify DST - start, end and offsets. - - :param stdabbr: - The abbreviation for standard time (e.g. ``'EST'``). - - :param stdoffset: - An integer or :class:`datetime.timedelta` object or equivalent - specifying the base offset from UTC. - - If unspecified, +00:00 is used. - - :param dstabbr: - The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). - - If specified, with no other DST information, DST is assumed to occur - and the default behavior or ``dstoffset``, ``start`` and ``end`` is - used. If unspecified and no other DST information is specified, it - is assumed that this zone has no DST. - - If this is unspecified and other DST information is *is* specified, - DST occurs in the zone but the time zone abbreviation is left - unchanged. - - :param dstoffset: - A an integer or :class:`datetime.timedelta` object or equivalent - specifying the UTC offset during DST. If unspecified and any other DST - information is specified, it is assumed to be the STD offset +1 hour. - - :param start: - A :class:`relativedelta.relativedelta` object or equivalent specifying - the time and time of year that daylight savings time starts. To - specify, for example, that DST starts at 2AM on the 2nd Sunday in - March, pass: - - ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` - - If unspecified and any other DST information is specified, the default - value is 2 AM on the first Sunday in April. - - :param end: - A :class:`relativedelta.relativedelta` object or equivalent - representing the time and time of year that daylight savings time - ends, with the same specification method as in ``start``. One note is - that this should point to the first time in the *standard* zone, so if - a transition occurs at 2AM in the DST zone and the clocks are set back - 1 hour to 1AM, set the ``hours`` parameter to +1. - - - **Examples:** - - .. testsetup:: tzrange - - from dateutil.tz import tzrange, tzstr - - .. doctest:: tzrange - - >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") - True - - >>> from dateutil.relativedelta import * - >>> range1 = tzrange("EST", -18000, "EDT") - >>> range2 = tzrange("EST", -18000, "EDT", -14400, - ... relativedelta(hours=+2, month=4, day=1, - ... weekday=SU(+1)), - ... relativedelta(hours=+1, month=10, day=31, - ... weekday=SU(-1))) - >>> tzstr('EST5EDT') == range1 == range2 - True - - """ - def __init__(self, stdabbr, stdoffset=None, - dstabbr=None, dstoffset=None, - start=None, end=None): - - global relativedelta - from dateutil import relativedelta - - self._std_abbr = stdabbr - self._dst_abbr = dstabbr - - try: - stdoffset = stdoffset.total_seconds() - except (TypeError, AttributeError): - pass - - try: - dstoffset = dstoffset.total_seconds() - except (TypeError, AttributeError): - pass - - if stdoffset is not None: - self._std_offset = datetime.timedelta(seconds=stdoffset) - else: - self._std_offset = ZERO - - if dstoffset is not None: - self._dst_offset = datetime.timedelta(seconds=dstoffset) - elif dstabbr and stdoffset is not None: - self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) - else: - self._dst_offset = ZERO - - if dstabbr and start is None: - self._start_delta = relativedelta.relativedelta( - hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) - else: - self._start_delta = start - - if dstabbr and end is None: - self._end_delta = relativedelta.relativedelta( - hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) - else: - self._end_delta = end - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = bool(self._start_delta) - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - if not self.hasdst: - return None - - base_year = datetime.datetime(year, 1, 1) - - start = base_year + self._start_delta - end = base_year + self._end_delta - - return (start, end) - - def __eq__(self, other): - if not isinstance(other, tzrange): - return NotImplemented - - return (self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr and - self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._start_delta == other._start_delta and - self._end_delta == other._end_delta) - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -@six.add_metaclass(_TzStrFactory) -class tzstr(tzrange): - """ - ``tzstr`` objects are time zone objects specified by a time-zone string as - it would be passed to a ``TZ`` variable on POSIX-style systems (see - the `GNU C Library: TZ Variable`_ for more details). - - There is one notable exception, which is that POSIX-style time zones use an - inverted offset format, so normally ``GMT+3`` would be parsed as an offset - 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an - offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX - behavior, pass a ``True`` value to ``posix_offset``. - - The :class:`tzrange` object provides the same functionality, but is - specified using :class:`relativedelta.relativedelta` objects. rather than - strings. - - :param s: - A time zone string in ``TZ`` variable format. This can be a - :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: - :class:`unicode`) or a stream emitting unicode characters - (e.g. :class:`StringIO`). - - :param posix_offset: - Optional. If set to ``True``, interpret strings such as ``GMT+3`` or - ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the - POSIX standard. - - .. caution:: - - Prior to version 2.7.0, this function also supported time zones - in the format: - - * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` - * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` - - This format is non-standard and has been deprecated; this function - will raise a :class:`DeprecatedTZFormatWarning` until - support is removed in a future version. - - .. _`GNU C Library: TZ Variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - """ - def __init__(self, s, posix_offset=False): - global parser - from dateutil.parser import _parser as parser - - self._s = s - - res = parser._parsetz(s) - if res is None or res.any_unused_tokens: - raise ValueError("unknown string format") - - # Here we break the compatibility with the TZ variable handling. - # GMT-3 actually *means* the timezone -3. - if res.stdabbr in ("GMT", "UTC") and not posix_offset: - res.stdoffset *= -1 - - # We must initialize it first, since _delta() needs - # _std_offset and _dst_offset set. Use False in start/end - # to avoid building it two times. - tzrange.__init__(self, res.stdabbr, res.stdoffset, - res.dstabbr, res.dstoffset, - start=False, end=False) - - if not res.dstabbr: - self._start_delta = None - self._end_delta = None - else: - self._start_delta = self._delta(res.start) - if self._start_delta: - self._end_delta = self._delta(res.end, isend=1) - - self.hasdst = bool(self._start_delta) - - def _delta(self, x, isend=0): - from dateutil import relativedelta - kwargs = {} - if x.month is not None: - kwargs["month"] = x.month - if x.weekday is not None: - kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) - if x.week > 0: - kwargs["day"] = 1 - else: - kwargs["day"] = 31 - elif x.day: - kwargs["day"] = x.day - elif x.yday is not None: - kwargs["yearday"] = x.yday - elif x.jyday is not None: - kwargs["nlyearday"] = x.jyday - if not kwargs: - # Default is to start on first sunday of april, and end - # on last sunday of october. - if not isend: - kwargs["month"] = 4 - kwargs["day"] = 1 - kwargs["weekday"] = relativedelta.SU(+1) - else: - kwargs["month"] = 10 - kwargs["day"] = 31 - kwargs["weekday"] = relativedelta.SU(-1) - if x.time is not None: - kwargs["seconds"] = x.time - else: - # Default is 2AM. - kwargs["seconds"] = 7200 - if isend: - # Convert to standard time, to follow the documented way - # of working with the extra hour. See the documentation - # of the tzinfo class. - delta = self._dst_offset - self._std_offset - kwargs["seconds"] -= delta.seconds + delta.days * 86400 - return relativedelta.relativedelta(**kwargs) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -class _tzicalvtzcomp(object): - def __init__(self, tzoffsetfrom, tzoffsetto, isdst, - tzname=None, rrule=None): - self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) - self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) - self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom - self.isdst = isdst - self.tzname = tzname - self.rrule = rrule - - -class _tzicalvtz(_tzinfo): - def __init__(self, tzid, comps=[]): - super(_tzicalvtz, self).__init__() - - self._tzid = tzid - self._comps = comps - self._cachedate = [] - self._cachecomp = [] - self._cache_lock = _thread.allocate_lock() - - def _find_comp(self, dt): - if len(self._comps) == 1: - return self._comps[0] - - dt = dt.replace(tzinfo=None) - - try: - with self._cache_lock: - return self._cachecomp[self._cachedate.index( - (dt, self._fold(dt)))] - except ValueError: - pass - - lastcompdt = None - lastcomp = None - - for comp in self._comps: - compdt = self._find_compdt(comp, dt) - - if compdt and (not lastcompdt or lastcompdt < compdt): - lastcompdt = compdt - lastcomp = comp - - if not lastcomp: - # RFC says nothing about what to do when a given - # time is before the first onset date. We'll look for the - # first standard component, or the first component, if - # none is found. - for comp in self._comps: - if not comp.isdst: - lastcomp = comp - break - else: - lastcomp = comp[0] - - with self._cache_lock: - self._cachedate.insert(0, (dt, self._fold(dt))) - self._cachecomp.insert(0, lastcomp) - - if len(self._cachedate) > 10: - self._cachedate.pop() - self._cachecomp.pop() - - return lastcomp - - def _find_compdt(self, comp, dt): - if comp.tzoffsetdiff < ZERO and self._fold(dt): - dt -= comp.tzoffsetdiff - - compdt = comp.rrule.before(dt, inc=True) - - return compdt - - def utcoffset(self, dt): - if dt is None: - return None - - return self._find_comp(dt).tzoffsetto - - def dst(self, dt): - comp = self._find_comp(dt) - if comp.isdst: - return comp.tzoffsetdiff - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._find_comp(dt).tzname - - def __repr__(self): - return "" % repr(self._tzid) - - __reduce__ = object.__reduce__ - - -class tzical(object): - """ - This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure - as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. - - :param `fileobj`: - A file or stream in iCalendar format, which should be UTF-8 encoded - with CRLF endings. - - .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 - """ - def __init__(self, fileobj): - global rrule - from dateutil import rrule - - if isinstance(fileobj, string_types): - self._s = fileobj - # ical should be encoded in UTF-8 with CRLF - fileobj = open(fileobj, 'r') - else: - self._s = getattr(fileobj, 'name', repr(fileobj)) - fileobj = _nullcontext(fileobj) - - self._vtz = {} - - with fileobj as fobj: - self._parse_rfc(fobj.read()) - - def keys(self): - """ - Retrieves the available time zones as a list. - """ - return list(self._vtz.keys()) - - def get(self, tzid=None): - """ - Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. - - :param tzid: - If there is exactly one time zone available, omitting ``tzid`` - or passing :py:const:`None` value returns it. Otherwise a valid - key (which can be retrieved from :func:`keys`) is required. - - :raises ValueError: - Raised if ``tzid`` is not specified but there are either more - or fewer than 1 zone defined. - - :returns: - Returns either a :py:class:`datetime.tzinfo` object representing - the relevant time zone or :py:const:`None` if the ``tzid`` was - not found. - """ - if tzid is None: - if len(self._vtz) == 0: - raise ValueError("no timezones defined") - elif len(self._vtz) > 1: - raise ValueError("more than one timezone available") - tzid = next(iter(self._vtz)) - - return self._vtz.get(tzid) - - def _parse_offset(self, s): - s = s.strip() - if not s: - raise ValueError("empty offset") - if s[0] in ('+', '-'): - signal = (-1, +1)[s[0] == '+'] - s = s[1:] - else: - signal = +1 - if len(s) == 4: - return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal - elif len(s) == 6: - return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal - else: - raise ValueError("invalid offset: " + s) - - def _parse_rfc(self, s): - lines = s.splitlines() - if not lines: - raise ValueError("empty string") - - # Unfold - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - - tzid = None - comps = [] - invtz = False - comptype = None - for line in lines: - if not line: - continue - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0].upper() - parms = parms[1:] - if invtz: - if name == "BEGIN": - if value in ("STANDARD", "DAYLIGHT"): - # Process component - pass - else: - raise ValueError("unknown component: "+value) - comptype = value - founddtstart = False - tzoffsetfrom = None - tzoffsetto = None - rrulelines = [] - tzname = None - elif name == "END": - if value == "VTIMEZONE": - if comptype: - raise ValueError("component not closed: "+comptype) - if not tzid: - raise ValueError("mandatory TZID not found") - if not comps: - raise ValueError( - "at least one component is needed") - # Process vtimezone - self._vtz[tzid] = _tzicalvtz(tzid, comps) - invtz = False - elif value == comptype: - if not founddtstart: - raise ValueError("mandatory DTSTART not found") - if tzoffsetfrom is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - if tzoffsetto is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - # Process component - rr = None - if rrulelines: - rr = rrule.rrulestr("\n".join(rrulelines), - compatible=True, - ignoretz=True, - cache=True) - comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, - (comptype == "DAYLIGHT"), - tzname, rr) - comps.append(comp) - comptype = None - else: - raise ValueError("invalid component end: "+value) - elif comptype: - if name == "DTSTART": - # DTSTART in VTIMEZONE takes a subset of valid RRULE - # values under RFC 5545. - for parm in parms: - if parm != 'VALUE=DATE-TIME': - msg = ('Unsupported DTSTART param in ' + - 'VTIMEZONE: ' + parm) - raise ValueError(msg) - rrulelines.append(line) - founddtstart = True - elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): - rrulelines.append(line) - elif name == "TZOFFSETFROM": - if parms: - raise ValueError( - "unsupported %s parm: %s " % (name, parms[0])) - tzoffsetfrom = self._parse_offset(value) - elif name == "TZOFFSETTO": - if parms: - raise ValueError( - "unsupported TZOFFSETTO parm: "+parms[0]) - tzoffsetto = self._parse_offset(value) - elif name == "TZNAME": - if parms: - raise ValueError( - "unsupported TZNAME parm: "+parms[0]) - tzname = value - elif name == "COMMENT": - pass - else: - raise ValueError("unsupported property: "+name) - else: - if name == "TZID": - if parms: - raise ValueError( - "unsupported TZID parm: "+parms[0]) - tzid = value - elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): - pass - else: - raise ValueError("unsupported property: "+name) - elif name == "BEGIN" and value == "VTIMEZONE": - tzid = None - comps = [] - invtz = True - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -if sys.platform != "win32": - TZFILES = ["/etc/localtime", "localtime"] - TZPATHS = ["/usr/share/zoneinfo", - "/usr/lib/zoneinfo", - "/usr/share/lib/zoneinfo", - "/etc/zoneinfo"] -else: - TZFILES = [] - TZPATHS = [] - - -def __get_gettz(): - tzlocal_classes = (tzlocal,) - if tzwinlocal is not None: - tzlocal_classes += (tzwinlocal,) - - class GettzFunc(object): - """ - Retrieve a time zone object from a string representation - - This function is intended to retrieve the :py:class:`tzinfo` subclass - that best represents the time zone that would be used if a POSIX - `TZ variable`_ were set to the same value. - - If no argument or an empty string is passed to ``gettz``, local time - is returned: - - .. code-block:: python3 - - >>> gettz() - tzfile('/etc/localtime') - - This function is also the preferred way to map IANA tz database keys - to :class:`tzfile` objects: - - .. code-block:: python3 - - >>> gettz('Pacific/Kiritimati') - tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') - - On Windows, the standard is extended to include the Windows-specific - zone names provided by the operating system: - - .. code-block:: python3 - - >>> gettz('Egypt Standard Time') - tzwin('Egypt Standard Time') - - Passing a GNU ``TZ`` style string time zone specification returns a - :class:`tzstr` object: - - .. code-block:: python3 - - >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - - :param name: - A time zone name (IANA, or, on Windows, Windows keys), location of - a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone - specifier. An empty string, no argument or ``None`` is interpreted - as local time. - - :return: - Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` - subclasses. - - .. versionchanged:: 2.7.0 - - After version 2.7.0, any two calls to ``gettz`` using the same - input strings will return the same object: - - .. code-block:: python3 - - >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') - True - - In addition to improving performance, this ensures that - `"same zone" semantics`_ are used for datetimes in the same zone. - - - .. _`TZ variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - - .. _`"same zone" semantics`: - https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html - """ - def __init__(self): - - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache_size = 8 - self.__strong_cache = OrderedDict() - self._cache_lock = _thread.allocate_lock() - - def __call__(self, name=None): - with self._cache_lock: - rv = self.__instances.get(name, None) - - if rv is None: - rv = self.nocache(name=name) - if not (name is None - or isinstance(rv, tzlocal_classes) - or rv is None): - # tzlocal is slightly more complicated than the other - # time zone providers because it depends on environment - # at construction time, so don't cache that. - # - # We also cannot store weak references to None, so we - # will also not store that. - self.__instances[name] = rv - else: - # No need for strong caching, return immediately - return rv - - self.__strong_cache[name] = self.__strong_cache.pop(name, rv) - - if len(self.__strong_cache) > self.__strong_cache_size: - self.__strong_cache.popitem(last=False) - - return rv - - def set_cache_size(self, size): - with self._cache_lock: - self.__strong_cache_size = size - while len(self.__strong_cache) > size: - self.__strong_cache.popitem(last=False) - - def cache_clear(self): - with self._cache_lock: - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache.clear() - - @staticmethod - def nocache(name=None): - """A non-cached version of gettz""" - tz = None - if not name: - try: - name = os.environ["TZ"] - except KeyError: - pass - if name is None or name == ":": - for filepath in TZFILES: - if not os.path.isabs(filepath): - filename = filepath - for path in TZPATHS: - filepath = os.path.join(path, filename) - if os.path.isfile(filepath): - break - else: - continue - if os.path.isfile(filepath): - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = tzlocal() - else: - if name.startswith(":"): - name = name[1:] - if os.path.isabs(name): - if os.path.isfile(name): - tz = tzfile(name) - else: - tz = None - else: - for path in TZPATHS: - filepath = os.path.join(path, name) - if not os.path.isfile(filepath): - filepath = filepath.replace(' ', '_') - if not os.path.isfile(filepath): - continue - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = None - if tzwin is not None: - try: - tz = tzwin(name) - except (WindowsError, UnicodeEncodeError): - # UnicodeEncodeError is for Python 2.7 compat - tz = None - - if not tz: - from dateutil.zoneinfo import get_zonefile_instance - tz = get_zonefile_instance().get(name) - - if not tz: - for c in name: - # name is not a tzstr unless it has at least - # one offset. For short values of "name", an - # explicit for loop seems to be the fastest way - # To determine if a string contains a digit - if c in "0123456789": - try: - tz = tzstr(name) - except ValueError: - pass - break - else: - if name in ("GMT", "UTC"): - tz = tzutc() - elif name in time.tzname: - tz = tzlocal() - return tz - - return GettzFunc() - - -gettz = __get_gettz() -del __get_gettz - - -def datetime_exists(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - would fall in a gap. - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" exists in - ``tz``. - - .. versionadded:: 2.7.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - tz = dt.tzinfo - - dt = dt.replace(tzinfo=None) - - # This is essentially a test of whether or not the datetime can survive - # a round trip to UTC. - dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) - dt_rt = dt_rt.replace(tzinfo=None) - - return dt == dt_rt - - -def datetime_ambiguous(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - is ambiguous (i.e if there are two times differentiated only by their DST - status). - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" is ambiguous in - ``tz``. - - .. versionadded:: 2.6.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - - tz = dt.tzinfo - - # If a time zone defines its own "is_ambiguous" function, we'll use that. - is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) - if is_ambiguous_fn is not None: - try: - return tz.is_ambiguous(dt) - except Exception: - pass - - # If it doesn't come out and tell us it's ambiguous, we'll just check if - # the fold attribute has any effect on this particular date and time. - dt = dt.replace(tzinfo=tz) - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dst = wall_0.dst() == wall_1.dst() - - return not (same_offset and same_dst) - - -def resolve_imaginary(dt): - """ - Given a datetime that may be imaginary, return an existing datetime. - - This function assumes that an imaginary datetime represents what the - wall time would be in a zone had the offset transition not occurred, so - it will always fall forward by the transition's change in offset. - - .. doctest:: - - >>> from dateutil import tz - >>> from datetime import datetime - >>> NYC = tz.gettz('America/New_York') - >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) - 2017-03-12 03:30:00-04:00 - - >>> KIR = tz.gettz('Pacific/Kiritimati') - >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) - 1995-01-02 12:30:00+14:00 - - As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, - existing datetime, so a round-trip to and from UTC is sufficient to get - an extant datetime, however, this generally "falls back" to an earlier time - rather than falling forward to the STD side (though no guarantees are made - about this behavior). - - :param dt: - A :class:`datetime.datetime` which may or may not exist. - - :return: - Returns an existing :class:`datetime.datetime`. If ``dt`` was not - imaginary, the datetime returned is guaranteed to be the same object - passed to the function. - - .. versionadded:: 2.7.0 - """ - if dt.tzinfo is not None and not datetime_exists(dt): - - curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() - old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() - - dt += curr_offset - old_offset - - return dt - - -def _datetime_to_timestamp(dt): - """ - Convert a :class:`datetime.datetime` object to an epoch timestamp in - seconds since January 1, 1970, ignoring the time zone. - """ - return (dt.replace(tzinfo=None) - EPOCH).total_seconds() - - -if sys.version_info >= (3, 6): - def _get_supported_offset(second_offset): - return second_offset -else: - def _get_supported_offset(second_offset): - # For python pre-3.6, round to full-minutes if that's not the case. - # Python's datetime doesn't accept sub-minute timezones. Check - # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 - # for some information. - old_offset = second_offset - calculated_offset = 60 * ((second_offset + 30) // 60) - return calculated_offset - - -try: - # Python 3.7 feature - from contextmanager import nullcontext as _nullcontext -except ImportError: - class _nullcontext(object): - """ - Class for wrapping contexts so that they are passed through in a - with statement. - """ - def __init__(self, context): - self.context = context - - def __enter__(self): - return self.context - - def __exit__(*args, **kwargs): - pass - -# vim:ts=4:sw=4:et diff --git a/utill/dateutil/tz/win.py b/utill/dateutil/tz/win.py deleted file mode 100644 index cde07ba..0000000 --- a/utill/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/utill/dateutil/tzwin.py b/utill/dateutil/tzwin.py deleted file mode 100644 index cebc673..0000000 --- a/utill/dateutil/tzwin.py +++ /dev/null @@ -1,2 +0,0 @@ -# tzwin has moved to dateutil.tz.win -from .tz.win import * diff --git a/utill/dateutil/utils.py b/utill/dateutil/utils.py deleted file mode 100644 index ebcce6a..0000000 --- a/utill/dateutil/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers general convenience and utility functions for dealing with -datetimes. - -.. versionadded:: 2.7.0 -""" -from __future__ import unicode_literals - -from datetime import datetime, time - - -def today(tzinfo=None): - """ - Returns a :py:class:`datetime` representing the current day at midnight - - :param tzinfo: - The time zone to attach (also used to determine the current day). - - :return: - A :py:class:`datetime.datetime` object representing the current day - at midnight. - """ - - dt = datetime.now(tzinfo) - return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) - - -def default_tzinfo(dt, tzinfo): - """ - Sets the the ``tzinfo`` parameter on naive datetimes only - - This is useful for example when you are provided a datetime that may have - either an implicit or explicit time zone, such as when parsing a time zone - string. - - .. doctest:: - - >>> from dateutil.tz import tzoffset - >>> from dateutil.parser import parse - >>> from dateutil.utils import default_tzinfo - >>> dflt_tz = tzoffset("EST", -18000) - >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) - 2014-01-01 12:30:00+00:00 - >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) - 2014-01-01 12:30:00-05:00 - - :param dt: - The datetime on which to replace the time zone - - :param tzinfo: - The :py:class:`datetime.tzinfo` subclass instance to assign to - ``dt`` if (and only if) it is naive. - - :return: - Returns an aware :py:class:`datetime.datetime`. - """ - if dt.tzinfo is not None: - return dt - else: - return dt.replace(tzinfo=tzinfo) - - -def within_delta(dt1, dt2, delta): - """ - Useful for comparing two datetimes that may a negilible difference - to be considered equal. - """ - delta = abs(delta) - difference = dt1 - dt2 - return -delta <= difference <= delta diff --git a/utill/dateutil/zoneinfo/__init__.py b/utill/dateutil/zoneinfo/__init__.py deleted file mode 100644 index 34f11ad..0000000 --- a/utill/dateutil/zoneinfo/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -import warnings -import json - -from tarfile import TarFile -from pkgutil import get_data -from io import BytesIO - -from dateutil.tz import tzfile as _tzfile - -__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] - -ZONEFILENAME = "dateutil-zoneinfo.tar.gz" -METADATA_FN = 'METADATA' - - -class tzfile(_tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - - -def getzoneinfofile_stream(): - try: - return BytesIO(get_data(__name__, ZONEFILENAME)) - except IOError as e: # TODO switch to FileNotFoundError? - warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) - return None - - -class ZoneInfoFile(object): - def __init__(self, zonefile_stream=None): - if zonefile_stream is not None: - with TarFile.open(fileobj=zonefile_stream) as tf: - self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) - for zf in tf.getmembers() - if zf.isfile() and zf.name != METADATA_FN} - # deal with links: They'll point to their parent object. Less - # waste of memory - links = {zl.name: self.zones[zl.linkname] - for zl in tf.getmembers() if - zl.islnk() or zl.issym()} - self.zones.update(links) - try: - metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) - metadata_str = metadata_json.read().decode('UTF-8') - self.metadata = json.loads(metadata_str) - except KeyError: - # no metadata in tar file - self.metadata = None - else: - self.zones = {} - self.metadata = None - - def get(self, name, default=None): - """ - Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method - for retrieving zones from the zone dictionary. - - :param name: - The name of the zone to retrieve. (Generally IANA zone names) - - :param default: - The value to return in the event of a missing key. - - .. versionadded:: 2.6.0 - - """ - return self.zones.get(name, default) - - -# The current API has gettz as a module function, although in fact it taps into -# a stateful class. So as a workaround for now, without changing the API, we -# will create a new "global" class instance the first time a user requests a -# timezone. Ugly, but adheres to the api. -# -# TODO: Remove after deprecation period. -_CLASS_ZONE_INSTANCE = [] - - -def get_zonefile_instance(new_instance=False): - """ - This is a convenience function which provides a :class:`ZoneInfoFile` - instance using the data provided by the ``dateutil`` package. By default, it - caches a single instance of the ZoneInfoFile object and returns that. - - :param new_instance: - If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and - used as the cached instance for the next call. Otherwise, new instances - are created only as necessary. - - :return: - Returns a :class:`ZoneInfoFile` object. - - .. versionadded:: 2.6 - """ - if new_instance: - zif = None - else: - zif = getattr(get_zonefile_instance, '_cached_instance', None) - - if zif is None: - zif = ZoneInfoFile(getzoneinfofile_stream()) - - get_zonefile_instance._cached_instance = zif - - return zif - - -def gettz(name): - """ - This retrieves a time zone from the local zoneinfo tarball that is packaged - with dateutil. - - :param name: - An IANA-style time zone name, as found in the zoneinfo file. - - :return: - Returns a :class:`dateutil.tz.tzfile` time zone object. - - .. warning:: - It is generally inadvisable to use this function, and it is only - provided for API compatibility with earlier versions. This is *not* - equivalent to ``dateutil.tz.gettz()``, which selects an appropriate - time zone based on the inputs, favoring system zoneinfo. This is ONLY - for accessing the dateutil-specific zoneinfo (which may be out of - date compared to the system zoneinfo). - - .. deprecated:: 2.6 - If you need to use a specific zoneinfofile over the system zoneinfo, - instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call - :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. - - Use :func:`get_zonefile_instance` to retrieve an instance of the - dateutil-provided zoneinfo. - """ - warnings.warn("zoneinfo.gettz() will be removed in future versions, " - "to use the dateutil-provided zoneinfo files, instantiate a " - "ZoneInfoFile object and use ZoneInfoFile.zones.get() " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].zones.get(name) - - -def gettz_db_metadata(): - """ Get the zonefile metadata - - See `zonefile_metadata`_ - - :returns: - A dictionary with the database metadata - - .. deprecated:: 2.6 - See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, - query the attribute ``zoneinfo.ZoneInfoFile.metadata``. - """ - warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " - "versions, to use the dateutil-provided zoneinfo files, " - "ZoneInfoFile object and query the 'metadata' attribute " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/utill/dateutil/zoneinfo/rebuild.py b/utill/dateutil/zoneinfo/rebuild.py deleted file mode 100644 index 78f0d1a..0000000 --- a/utill/dateutil/zoneinfo/rebuild.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -import os -import tempfile -import shutil -import json -from subprocess import check_call -from tarfile import TarFile - -from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME - - -def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): - """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* - - filename is the timezone tarball from ``ftp.iana.org/tz``. - - """ - tmpdir = tempfile.mkdtemp() - zonedir = os.path.join(tmpdir, "zoneinfo") - moduledir = os.path.dirname(__file__) - try: - with TarFile.open(filename) as tf: - for name in zonegroups: - tf.extract(name, tmpdir) - filepaths = [os.path.join(tmpdir, n) for n in zonegroups] - try: - check_call(["zic", "-d", zonedir] + filepaths) - except OSError as e: - _print_on_nosuchfile(e) - raise - # write metadata file - with open(os.path.join(zonedir, METADATA_FN), 'w') as f: - json.dump(metadata, f, indent=4, sort_keys=True) - target = os.path.join(moduledir, ZONEFILENAME) - with TarFile.open(target, "w:%s" % format) as tf: - for entry in os.listdir(zonedir): - entrypath = os.path.join(zonedir, entry) - tf.add(entrypath, entry) - finally: - shutil.rmtree(tmpdir) - - -def _print_on_nosuchfile(e): - """Print helpful troubleshooting message - - e is an exception raised by subprocess.check_call() - - """ - if e.errno == 2: - logging.error( - "Could not find zic. Perhaps you need to install " - "libc-bin or some other package that provides it, " - "or it's not in your PATH?") diff --git a/utill/db/model.py b/utill/db/model.py deleted file mode 100644 index 4bfdc47..0000000 --- a/utill/db/model.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -from .mysql import mysql -from .sqlite import sqlite -from kcweb import config -from kcweb import common -class model: - table=None - fields={} - __db=None - config=config.database - def __init__(self): - if not self.table: - self.table=self.__class__.__name__ - self.__db=common.M(self.table,self.config) - def create_table(self): - "创建表" - sqlist=[] - for k in self.fields.keys(): - sqlist.append(k+" "+self.fields[k]) - # print(self.table) - sqls="create table "+self.table+" (" - for k in sqlist: - sqls=sqls+k+", " - sqls=sqls[:-2]+")" - # print(sqls) - self.__db.execute(sqls) - def find(self): - return self.__db.find() - def select(self): - lists=self.__db.select() - # print(lists) - return lists - def insert(self,data): - return self.__db.insert(data) - def update(self,data): - return self.__db.update(data) - def startTrans(self): - "开启事务,仅对 update方法、delete方法、install方法有效" - self.__db.startTrans() - def commit(self): - """事务提交 - - 增删改后的任务进行提交 - """ - self.__db.commit() - def rollback(self): - """事务回滚 - - 增删改后的任务进行撤销 - """ - self.__db.rollback() - def where(self,where = None,*wheres): - """设置过滤条件 - - 传入方式: - "id",2 表示id='2' - - "id","in",2,3,4,5,6,...表示 id in (2,3,4,5,6,...) - - "id","or",2,3,4,5,6,...表示 id=2 or id=3 or id=4... - - [("id","gt",6000),"and",("name","like","%超")] 表示 ( id > "6000" and name LIKE "%超" ) - - "id","eq",1 表示 id = '1' - - eq 等于 - neq 不等于 - gt 大于 - egt 大于等于 - lt 小于 - elt 小于等于 - like LIKE - """ - self.__db.where(where,*wheres) - return self - def field(self,field = "*"): - """设置过滤显示条件 - - 参数 field:str 字符串 - """ - self.__db.field(field) - return self - __limit=[] - def limit(self,offset, length = None): - """设置查询数量 - - 参数 offset:int 起始位置 - - 参数 length:int 查询数量 - """ - self.__db.limit(offset, length) - return self - def order(self,strs=None,*strs1): - """设置排序查询 - - 传入方式: - - "id desc" - - "id",'name','appkey','asc' - - "id",'name','appkey' 不包含asc或desc的情况下 默认是desc - - ['id','taskid',{"task_id":"desc"}] - """ - self.__db.order(strs=None,*strs1) - return self - __distinct=None - def distinct(self,bools=None): - "用于返回唯一不同的值,配合field方法使用生效,消除所有重复的记录,并只获取唯一一次记录。" - self.__db.distinct(bools) - return self - def deltableall(self): - "删除当前数据库所有表格 mysql有效" - if self.conf['type']=='mysql': - a=self.__db.execute("SELECT concat('DROP TABLE IF EXISTS ', table_name, ';') FROM information_schema.tables WHERE table_schema = 'core1';") - for k in a: - self.__db.execute(k["concat('DROP TABLE IF EXISTS ', table_name, ';')"]) - - - - -class dbtype: - conf=model.config - def int(LEN=16,DEFAULT=False,NULL=False,UNIQUE=False,PRI=False,A_L=False): - # print(dbtype.conf['type']) - if dbtype.conf['type']=='mysql': - strs="INT("+str(LEN)+")" - if DEFAULT: - strs=strs+" DEFAULT "+str(DEFAULT) - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - if UNIQUE: - strs=strs+" UNIQUE" - if PRI: - strs=strs+" PRIMARY KEY" - if A_L: - strs=strs+" AUTO_INCREMENT" - else: - strs="INTEGER" - if DEFAULT: - strs=strs+" DEFAULT "+str(DEFAULT) - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - if UNIQUE: - strs=strs+" UNIQUE" - if PRI: - strs=strs+" PRIMARY KEY" - if A_L: - strs=strs+" AUTOINCREMENT" - return strs - def varchar(LEN=32,DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False,FULLTEXT=False): - strs="VARCHAR("+str(LEN)+")" - if DEFAULT: - strs=strs+" DEFAULT "+str(DEFAULT) - elif DEFAULT=='': - strs=strs+" DEFAULT ''" - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - if UNIQUE: - strs=strs+" UNIQUE" - if INDEX: - strs=strs+" INDEX" - if FULLTEXT: - strs=strs+" FULLTEXT" - return strs - def text(NULL=False): - if dbtype.conf['type']=='mysql': - strs="TEXT CHARACTER SET utf8 COLLATE utf8_general_ci" - else: - strs="TEXT" - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - return strs - def char(LEN=16,DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False): - strs=" CHAR("+str(LEN)+")" - if DEFAULT: - strs=strs+" DEFAULT "+str(DEFAULT) - elif DEFAULT=='': - strs=strs+" DEFAULT ''" - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - if UNIQUE: - strs=strs+" UNIQUE" - if INDEX: - strs=strs+" INDEX" - return strs - def decimat(LEN="10,2",DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False): - "小数类型" - strs="DECIMAL("+str(LEN)+")" - if DEFAULT: - strs=strs+" DEFAULT "+str(DEFAULT) - elif DEFAULT=='': - strs=strs+" DEFAULT ''" - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - if UNIQUE: - strs=strs+" UNIQUE" - if INDEX: - strs=strs+" INDEX" - return strs - def date(NULL=False): - strs=" DATE" - if NULL: - strs=strs+" NULL" - else: - strs=strs+" NOT NULL" - return strs \ No newline at end of file diff --git a/utill/db/mongodb.py b/utill/db/mongodb.py deleted file mode 100644 index 960eaa4..0000000 --- a/utill/db/mongodb.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -import pymongo,re -from bson.objectid import ObjectId -from kcweb.config import mongo as mongodb -class mongo: - __config=mongodb - __clientobj=None - __dbobj=None - __tabobj=None - def __setconn(self): - if not self.__clientobj: - if self.__config['retryWrites']: - strs='mongodb://'+self.__config['host']+':'+self.__config['port']+'/' - else: - strs='mongodb://'+self.__config['host']+':'+self.__config['port']+'/?retryWrites=false' - self.__clientobj = pymongo.MongoClient(strs) - self.__dbobj = self.__clientobj[self.__config['db']] - if self.__config['user'] and self.__config['password']: - self.__dbobj.authenticate(self.__config['user'],self.__config['password']) #账号密码认证 - self.__tabobj=self.__dbobj[self.__table] - def connect(self,config): - """设置mongo链接信息 - - 参数 config 参考配置信息格式 可以设置数据库名(以字符串形式) - - 返回 mongodb对象 - """ - if config: - if isinstance(config,dict): - if "host" in config: - self.__config['host']=config['host'] - if "port" in config: - self.__config['port']=config['port'] - if "user" in config: - self.__config['user']=config['user'] - if "password" in config: - self.__config['password']=config['password'] - if "db" in config: - self.__config['db']=config['db'] - elif isinstance(config,str): - self.__config['db']=config - else: - print("config类型错误,设置连接不生效") - return self - def getobj(self): - "获取mongodb链接实例" - self.__setconn() - return self.__tabobj - def select(self,id=None): - """查询所有文档 - - 返回 文档列表 - """ - self.__setconn() - if id: - self.where('_id',id) - lists=[] - if self.__field: - arr=self.__tabobj.find(self.__where,self.__field) - else: - arr=self.__tabobj.find(self.__where) - if self.__limit: - if self.__limit[1]: - arr.limit(self.__limit[1]) - arr.skip(self.__limit[0]) - else: - arr.limit(self.__limit[0]) - for k in arr: - try: k['_id'] - except: pass - else: k['_id']=str(k['_id']) - lists.append(k) - return lists - def find(self,id=None): - """查询一条文档 - - 返回 文档内容 - """ - self.__setconn() - if id: - self.where('_id',id) - if self.__field: - arr = self.__tabobj.find_one(self.__where,self.__field) - else: - arr = self.__tabobj.find_one(self.__where) - try: arr['_id'] - except: pass - else: arr['_id']=str(arr['_id']) - return arr - def countlist(self): - """查询文档数量和所有文档 - - 返回 文档数量,文档列表 - """ - self.__setconn() - lists=[] - if self.__field: - arr=self.__tabobj.find(self.__where,self.__field) - else: - arr=self.__tabobj.find(self.__where) - if self.__limit: - if self.__limit[1]: - arr.limit(self.__limit[1]) - arr.skip(self.__limit[0]) - else: - arr.limit(self.__limit[0]) - for k in arr: - try: k['_id'] - except: pass - else: k['_id']=str(k['_id']) - lists.append(k) - return arr.count(),lists - def count(self): - """查询文档数量 - - 返回 文档数量 - """ - self.__setconn() - count=self.__tabobj.find(self.__where,{}).count() - return count - def update(self,data,multi=True): - """文档更新 - - 参数 data 要更新的内容 格式:{"name":"测试","age":20} - - multi 默认True 是否全部更新 - """ - #{ "count" : { $gt : 3 } } , { $set : { "test2" : "OK"} } - self.__setconn() - # print(self.__where) - # print({"$set":data}) - ar=self.__tabobj.update(self.__where,{"$set":data},multi=multi) - return ar - if ar: - return ar['nModified'] - else: - return 0 - def delete(self,id=None): - """文档删除 删除条件是where函数 - """ - self.__setconn() - if id: - self.where('_id',id) - if self.__where: - # print(self.__where) - # exit() - bo=self.__tabobj.remove(self.__where) - if bo: - return bo['n'] - else: - return 0 - else: - return 0 - def deleteAll(self,id=None): - """删所有文档除 - """ - self.__setconn() - bo=self.__tabobj.remove({}) - if bo: - return bo['n'] - else: - return 0 - def insert(self,dicts): - """插入文档 单条插入或多条插入 - - 参数 dicts 要插入的内容 单条格式:{"name":"测试","age":20} 。 多条格式:[{"name":"测试","age":20},{"name":"测试","age":20}] - - 返回插入的数量 - - """ - self.__setconn() - co=0 - if isinstance(dicts,dict): - if self.__tabobj.insert_one(dicts): - co=1 - elif isinstance(dicts,list): - lens=len(dicts) - if lens>100: - raise RuntimeError('列表数量超过最大限制100') - if self.__tabobj.insert_many(dicts): - co=lens - return co - __table="" - def table(self,table): - """设置集合名 - - 参数 table:str 表名 - """ - self.__table=table - return self - __where={} - def where(self,where = None,*wheres): - """设置过滤条件 - - 参数 where:str 字符串 或 列表 - - 传入方式: - - "id",2 表示id='2' - - "id","in",2,3,4 ...表示 id=2 or id=3 or id=4 ... - - "id","or",2,3,4 ...表示 id=2 or id=3 or id=4 ... - - "id","neq",1 表示 id 不等于 '1' - - eq 等于 - neq 不等于 - gt 大于 - egt 大于等于 - lt 小于 - elt 小于等于 - like LIKE - """ - # print("wheres",wheres) {'comments':re.compile('abc')} - if isinstance(where,dict): - self.__where=where - elif isinstance(where,list): - # import re [("name","eq",'冯坤'),"and",("aa","like",'%wfweaf')] - # print(re.compile('abc')) - #{"likes": {$gt:50}, "name": "冯坤","title": "MongoDB 教程"} - #{"likes":'dav', $or: [{"by": "菜鸟教程"},{"title": "MongoDB 教程"}]} - zd={} - t='' - for k in where: - if isinstance(k,tuple): - if k[1]=='eq': - if t=='or': - zd['$or'].append({k[0]:k[2]}) - else: - zd[k[0]]=k[2] - elif k[1]=='like': - if t=='or': - zd['$or'].append({k[0]:re.compile(re.sub('%','',k[2]))}) - else: - zd[k[0]]=re.compile(re.sub('%','',k[2])) - else: - if t=='or': - zd['$or'].append({k[0]:{'$'+k[1]:k[2]}}) - else: - n=self.__operator(k[1]) - zd[k[0]]={n:k[2]} - elif isinstance(k,str): - if k=='or': - t=k - zd['$or']=[] - self.__where=zd - # print(zd) - # exit() - elif isinstance(where,str) and len(wheres)==1: - wheres=list(wheres) - if where=='_id': - wheres[0]=ObjectId(wheres[0]) - self.__where[where]=wheres[0] - elif isinstance(where,str) and len(wheres)==2: - wheres=list(wheres) - if where=='_id': - wheres[1]=ObjectId(wheres[1]) - if wheres[0] == 'eq': - self.__where[where]=wheres[1] - elif wheres[0]=='like': - self.__where[where]=re.compile(re.sub('%','',wheres[1])) - else: - n=self.__operator(wheres[0]) - self.__where[where]={n:wheres[1]} - elif isinstance(where,str) and isinstance(wheres,tuple): - #{$or: [{key1: value1}, {key2:value2}]} - # self.__where={'$or': [{where: wheres[0]}, {where:wheres[1]}]} - # print(wheres) - lists=[] - for k in wheres: - lists.append({where:k}) - self.__where={'$or': lists} - # print(self.__where) - return self - __field={} - def field(self,field = "*"): - """设置过滤显示条件 - - 参数 field:str 字符串 - """ - if field and field!='*': - field=field.split(",") - zd={} - for f in field: - zd[f]=1 - self.__field=zd - return self - __limit=[] - def limit(self,offset, length = None): - """设置查询数量 - - 参数 offset:int 起始位置 - - 参数 length:int 查询数量 - """ - if length==None: - length=offset - offset=0 - elif offset > 0: - offset=offset*length-length - self.__limit=[offset,length] - return self - # def order(self,k): - # pass - def __operator(self,strs): - """运算符转换 - 参数 strs 待转的字符串 - 返回 已转换的运算符 - - 符号定义 - eq 等于 - neq 不等于 - gt 大于 - egt 大于等于 - lt 小于 - elt 小于等于 - """ - strss=strs.upper() - if strss == 'NEQ': - k='$ne' - elif strss == 'GT': - k='$gt' - elif strss == 'EGT': - k='$gte' - elif strss == 'LT': - k='$lt' - elif strss == 'ELT': - k='$lte' - else: - k=strss - return k \ No newline at end of file diff --git a/utill/db/mysql.py b/utill/db/mysql.py deleted file mode 100644 index 8bfaae3..0000000 --- a/utill/db/mysql.py +++ /dev/null @@ -1,1020 +0,0 @@ -# -*- coding: utf-8 -*- -from .pymysql import connect,escape_string -# import config.conf as config -import kcweb.config as config -import time,traceback,decimal,random -dbconfig=config.database -class mysql: - """数据库实例""" - __config=dbconfig - __conn={} #数据库链接对象 - __cursor=None #游标对象 - __errorcount=dbconfig['break'] #允许最大链接错误次数 - __errorcounts=0 #默认链接错误次数 - __dbObjcount=dbconfig['dbObjcount'] #数据库链接实例数量 - __sql='' - __sqls='' - __masteridentifier='' # 主服务器标识 - __slaveidentifier='' # 从服务器标识 - def __del__(self): - if not self.__config['pattern'] and self.__conn: - try: - self.__conn.close() - except Exception as e: - print("关闭失败",e) - __dbcount=1 - def __setdbcount(self): - "设置数据库配置总数量" - if isinstance(self.__config['host'],str): - self.__config['host']=[self.__config['host']] - if isinstance(self.__config['port'],str): - self.__config['port']=[self.__config['port']] - if isinstance(self.__config['user'],str): - self.__config['user']=[self.__config['user']] - if isinstance(self.__config['password'],str): - self.__config['password']=[self.__config['password']] - if isinstance(self.__config['db'],str): - self.__config['db']=[self.__config['db']] - host=len(self.__config['host']) - port=len(self.__config['port']) - user=len(self.__config['user']) - password=len(self.__config['password']) - db=len(self.__config['db']) - lists=[host,port,user,password,db] - lists.sort() - self.__dbcount=lists[0] - def __closeconn(self,identifier): - "长链接模式下,关闭链接池的链接" - if self.__config['pattern']: - if identifier in mysql.__conn: - for k in mysql.__conn[identifier]: - # print(identifier) - try: - k['obj'].close() - print(k,"关闭成功") - except: - print(k,"关闭错误") - mysql.__conn[identifier]=[] - __dbobjident=None #集中式(单一服务器)并且长连接模式下随机服务器链接标识 和 分布式(主从服务器)模式下随机服务器链接标识 - def __connects(self,typess="DQL"): - """设置数据库链接 - - 参数 typess :数据查询语言DQL,数据操纵语言DML,数据定义语言DDL,数据控制语言DCL - """ - - try: - if self.__config['deploy']==0: # 集中式(单一服务器) - if self.__config['pattern']: # 长连接情况下 - self.__masteridentifier=self.__config['host'][0]+str(self.__config['port'][0])+self.__config['db'][0] # 服务器标识 - if self.__masteridentifier not in mysql.__conn or len(mysql.__conn[self.__masteridentifier])<1: - i=0 - masterlistsdb=[] - while iself.__config['dbObjcount'] * self.__dbcount: #长连接情况下如果错误次数超过数据实例数量 关闭使用连接进行重连接 - self.__patternerrorcount=0 - if self.__config['deploy'] == 1: #分布式(主从服务器) 情况下 - print("数据库连接失效,关闭主从连接池后重新连接") - self.__closeconn(self.__masteridentifier) - self.__closeconn(self.__slaveidentifier) - time.sleep(10) - # mysql.__conn=[] #父类数据库实例 - self.__connects(typess) - self.__execute(typess) - else: - print("数据库连接失效,关闭主连接池后重新连接") - self.__closeconn(self.__masteridentifier) - time.sleep(10) - # mysql.__conn=[] #父类数据库实例 - self.__connects(typess) - self.__execute(typess) - else: - self.__patternerrorcount=self.__patternerrorcount+1 - self.__execute(typess) - else: - self.__conn[bs][self.__dbobjident]['error']=self.__conn[bs][self.__dbobjident]['error']+1 #当前数据库连接实例异常错误数量 - if self.__conn[bs][self.__dbobjident]['error'] > 2: - try: - mysql.__conn[bs][self.__dbobjident]['obj'].close() #关闭当前实例 - except Exception as e: - print("关闭异常",e) - # self.__conn[bs].pos(self.__dbobjident) #从列表中删除 - # if errorcodes == 2013: - #创建一个新的数据库实例 - if types=='master': - s=random.randint(0,self.__config['master_num']-1) - else: - s=random.randint(self.__dbcount-self.__config['master_num']-1,self.__dbcount-1) - obj=connect(host=self.__config['host'][s], port=self.__config['port'][s], user=self.__config['user'][s], password=self.__config['password'][s], db=self.__config['db'][s], charset=self.__config['charset']) - mysql.__conn[bs][self.__dbobjident]['obj']=obj - mysql.__conn[bs][self.__dbobjident]['error']=0 - print("已重新创建一个新的数据库实例",mysql.__conn) - self.__execute(typess) - else: # 短连接情况下 - print("服务器正在被关闭,关闭当前连接后重试") - try: - mysql.__conn.close() #关闭当前实例 - except Exception as e: - print("关闭异常",e) - # mysql.__conn=[] #父类数据库实例 - self.__connects(typess) - self.__execute(typess) - else: - raise Exception(e) - else: - self.__patternerrorcount=0 - return res - - def query(self,sql): - """执行sql语句 注:只支持单一服务器模式 - - 参数 sql 字符串 - - 返回 列表 或 数字 - """ - self.__sql=sql - res=self.__execute('DQL') - description=self.__cursor.description #获取字段 - result = self.__cursor.fetchall() #获取查询结果 - # print(result) - self.__cursor.close() - if description is None: - return res - else: - lists=[] - data_dict=[] - for field in description:#获取字段 - data_dict.append(field[0]) - for k in result: - i=0 - dicts={} - for j in k: - dicts[data_dict[i]]=j - i=i+1 - lists.append(dicts) - return lists - def execute(self,sql): - """执行sql语句 注:只支持单一服务器模式 - - 参数 sql 字符串 - - 返回 列表 或 数字 - """ - self.__sql=sql - res=self.__execute('DML') - description=self.__cursor.description #获取字段 - result = self.__cursor.fetchall() #获取查询结果 - # print(result) - self.__cursor.close() - if description is None: - return res - else: - lists=[] - data_dict=[] - for field in description:#获取字段 - data_dict.append(field[0]) - for k in result: - i=0 - dicts={} - for j in k: - dicts[data_dict[i]]=j - i=i+1 - lists.append(dicts) - return lists - - - def select(self,id=None): - """select查询 - - 返回 list(列表) - """ - if id : - self.__where="id=%d" % id - self.__setsql() - if self.__buildSql: - self.__sqls="("+self.__sql+")" - self.__None() - return self.__sqls - - self.__execute() - description=self.__cursor.description #获取字段 - result = self.__cursor.fetchall() #获取查询结果 - # print(result) - self.__cursor.close() - lists=[] - keys =[] - for field in description:#获取字段 - keys.append(field[0]) - key_number = len(keys) - for row in result: - item = dict() - for q in range(key_number): - k=row[q] - if type(row[q])==decimal.Decimal: - k=float(row[q]) - item[keys[q]] = k - lists.append(item) - return lists - def find(self,id=None): - """查询一条记录 - - 返回 字典 - """ - if id : - self.__where="id=%s" % id - self.limit(1) - self.__setsql() - if self.__buildSql: - self.__sqls="("+self.__sql+")" - self.__None() - return self.__sqls - self.__execute() - description=self.__cursor.description #获取字段 - result = self.__cursor.fetchall() #获取查询结果 - # print(result) - self.__cursor.close() - - item = dict() - keys =[] - for field in description:#获取字段 - keys.append(field[0]) - key_number = len(keys) - for row in result: - for q in range(key_number): - k=row[q] - if type(row[q])==decimal.Decimal: - k=float(row[q]) - item[keys[q]] = k - return item - - def count(self,field="*"): - """查询数量 - - 返回 int 数字 - """ - self.__field=field - self.__setsql('count') - if self.__buildSql: - self.__sqls="("+self.__sql+")" - return self.__sql - self.__execute() - result = self.__cursor.fetchall() #获取查询结果 - self.__cursor.close() - cou=int(result[0][0]) - return cou - def max(self,field): - """查询某字段的最大值 - - 返回 int 数字 - """ - self.__field=field - self.__setsql('max') - if self.__buildSql: - self.__sqls="("+self.__sql+")" - return self.__sql - self.__execute() - result = self.__cursor.fetchall() #获取查询结果 - self.__cursor.close() - cou=int(result[0][0]) - return cou - def min(self,field): - """查询某字段的最小值 - - 返回 int 数字 - """ - self.__field=field - self.__setsql('min') - if self.__buildSql: - self.__sqls="("+self.__sql+")" - return self.__sql - self.__execute() - result = self.__cursor.fetchall() #获取查询结果 - self.__cursor.close() - cou=int(result[0][0]) - return cou - def avg(self,field): - """查询某字段的平均值 - - 返回 int 数字 - """ - self.__field=field - self.__setsql('avg') - if self.__buildSql: - self.__sqls="("+self.__sql+")" - return self.__sql - self.__execute() - result = self.__cursor.fetchall() #获取查询结果 - self.__cursor.close() - cou=int(result[0][0]) - return cou - def sum(self,field): - """查询某字段之和 - - 返回 int 数字 - """ - self.__field=field - self.__setsql('sum') - if self.__buildSql: - self.__sqls="("+self.__sql+")" - return self.__sql - self.__execute() - result = self.__cursor.fetchall() #获取查询结果 - self.__cursor.close() - cou=int(result[0][0]) - return cou - - - def update(self,data,affair=False): - """数据表更新 - - 参数 data 要更新的内容 格式:{"name":"测试","age":20} - - 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 - """ - self.__setsql('update',data) - res=self.__execute('DML') - if affair==False and self.__startTrans==False: - if not self.__config['pattern']: - self.__conn.commit() - else: - self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() - self.__cursor.close() - return res - def delete(self,affair=False): - """数据表删除 - - 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 - """ - self.__setsql('delete') - if self.__where: - res=self.__execute('DML') - else: - return 0 - if affair==False and self.__startTrans==False: - if not self.__config['pattern']: - self.__conn.commit() - else: - self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() - self.__cursor.close() - return res - def insert(self,dicts,affair=False): - """插入数据库 单条插入或多条插入 - - 参数 dicts 要插入的内容 单条格式:{"name":"测试","age":20} 。 多条格式:[{"name":"测试","age":20},{"name":"测试","age":20}] - - 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 - - 返回插入的数量 - """ - self.__setsql('insert',dicts) - res=self.__execute('DML') - if affair==False and self.__startTrans==False: - if not self.__config['pattern']: - self.__conn.commit() - else: - self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() - self.__cursor.close() - return res - - __startTrans=False - def startTrans(self): - "开启事务,仅对 update方法、delete方法、install方法有效" - self.__startTrans=True - def commit(self): - """事务提交 - - 增删改后的任务进行提交 - """ - if not self.__config['pattern']: - self.__conn.commit() - else: - self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() - - def rollback(self): - """事务回滚 - - 增删改后的任务进行撤销 - """ - if not self.__config['pattern']: - self.__conn.rollback() - else: - self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].rollback() - def getsql(self): - """得到生成的sql语句""" - return self.__sql - __buildSql=None - def buildSql(self): - """构造子查询""" - self.__buildSql=True - return self - def __None(self): - "清除所有赋值条件" - self.__lock=None - self.__distinct=None - self.__join=None - self.__joinstr='' - self.__alias=None - self.__having=None - self.__group=None - self.__group1=None - self.__order=None - self.__order1=None - self.__limit=None - self.__field="*" - self.__where=None - self.__wheres=() - self.__table=None - self.__buildSql=None - self.__table=None - - __where=None - __wheres=() - def where(self,where = None,*wheres): - """设置过滤条件 - - 传入方式: - "id",2 表示id='2' - - "id","in",2,3,4,5,6,...表示 id in (2,3,4,5,6,...) - - "id","in",[2,3,4,5,6,...]表示 id in (2,3,4,5,6,...) - - - [("id","gt",6000),"and",("name","like","%超")] 表示 ( id > "6000" and name LIKE "%超" ) - - "id","eq",1 表示 id = '1' - - eq 等于 - neq 不等于 - gt 大于 - egt 大于等于 - lt 小于 - elt 小于等于 - like LIKE - """ - self.__where=where - self.__wheres=wheres - return self - __field='*' - def field(self,field = "*"): - """设置过滤显示条件 - - 参数 field:str 字符串 - """ - self.__field=field - return self - __limit=[] - def limit(self,offset, length = None): - """设置查询数量 - - 参数 offset:int 起始位置 - - 参数 length:int 查询数量 - """ - self.__limit=[offset,length] - return self - def page(self,pagenow=1, length = 20): - """设置分页查询 - - 参数 pagenow:int 页码 - - 参数 length:int 查询数量 - """ - offset=(pagenow-1)*length - self.__limit=[offset,length] - return self - __order=None - __order1=None - def order(self,strs=None,*strs1): - """设置排序查询 - - 传入方式: - - "id desc" - - "id",'name','appkey','asc' - - "id",'name','appkey' 不包含asc或desc的情况下 默认是desc - - ['id','taskid',{"task_id":"desc"}] - """ - self.__order=strs - self.__order1=strs1 - return self - __group=None - __group1=None - def group(self,strs=None,*strs1): - """设置分组查询 - - 传入方式: - - "id,name" - - "id","name" - """ - self.__group=strs - self.__group1=strs1 - return self - __having=None - def having(self,strs=None): - """用于配合group方法完成从分组的结果中筛选(通常是聚合条件)数据 - - 参数 strs:string 如:"count(time)>3" - """ - self.__having=strs - return self - __alias=None - def alias(self,strs=None): - """用于设置当前数据表的别名,便于使用其他的连贯操作例如join方法等。 - - 参数 strs:string 默认当前表作为别名 - """ - if strs: - self.__alias=strs - else: - self.__alias=self.__table - return self - __join=None - __joinstr='' - def join(self,strs,on=None,types='INNER'): - """用于根据两个或多个表中的列之间的关系,从这些表中查询数据 - - 参数 strs string 如:"test t1" test表设置别名t1 - - 参数 on string 如:"t1.id=t2.pid" 设置连接条件 - - 参数 types 支持INNER、LEFT、RIGHT、FULL 默认INNER - - """ - joinstr='' - if strs and on: - joinstr=joinstr+types+" JOIN "+strs+" ON "+on+" " - if joinstr: - self.__joinstr=self.__joinstr+joinstr - return self - __distinct=None - def distinct(self,bools=None): - "用于返回唯一不同的值,配合field方法使用生效,来消除所有重复的记录,并只获取唯一一次记录。" - self.__distinct=bools - return self - __lock=None - def lock(self,strs=None): - """用于数据库的锁机制,在查询或者执行操作的时候使用 - - 排他锁 (FOR UPDATE) - - 共享锁 (lock in share mode) - - 参数 strs 如:True表示自动在生成的SQL语句最后加上FOR UPDATE, - - - """ - self.__lock=strs - return self - - # __cache=[] - # def cache(self,endtime,tag=None): - # """设置查询缓存 - - # 参数 endtime:int 缓存数据 0永久 - - # 参数 tag:int 缓存标签 - # """ - # self.__cache=[endtime,tag] - # return self - def __setsql(self,types=None,data = {}): - """生成sql语句""" - if types==None: - self.__sql="SELECT" - if self.__distinct and self.__field: - self.__sql=self.__sql+" DISTINCT" - if self.__alias: - self.__sql=self.__sql+" %s FROM %s %s" % (self.__field,self.__table,self.__alias) - else: - self.__sql=self.__sql+" %s FROM %s" % (self.__field,self.__table) - elif types=='count': - self.__sql="SELECT COUNT(%s) FROM %s" % (self.__field,self.__table) - elif types=='max': - self.__sql="SELECT MAX(%s) FROM %s" % (self.__field,self.__table) - elif types=='min': - self.__sql="SELECT MIN(%s) FROM %s" % (self.__field,self.__table) - elif types=='avg': - self.__sql="SELECT AVG(%s) FROM %s" % (self.__field,self.__table) - elif types=='sum': - self.__sql="SELECT SUM(%s) FROM %s" % (self.__field,self.__table) - elif types=='update': - strs='' - for k in data: - if isinstance(data[k],str): - strs=strs+" %s = '%s' ," % (k,escape_string(data[k])) - else: - strs=strs+" %s = %s ," % (k,data[k]) - strs=strs[:-1] - self.__sql="UPDATE %s SET %s" % (self.__table,strs) - elif types=='delete': - self.__sql="DELETE FROM %s" % self.__table - elif types=='insert': - if isinstance(data,dict): - strs='' - val='' - for k in data: - strs=strs+"%s," % k - if isinstance(data[k],str): - val=val+"'%s'," % escape_string(data[k]) - else: - val=val+"%s," % data[k] - strs=strs[:-1] - val=val[:-1] - self.__sql="INSERT INTO %s (%s) VALUES (%s)" % (self.__table,strs,val) - elif isinstance(data,list): - strs='' - val='(' - for k in data[0]: - strs=strs+" , "+k - for k in data: - for j in k: - if isinstance(k[j],str): - val=val+"'"+str(k[j])+"'," - else: - val=val+str(k[j])+"," - val=val[:-1] - val=val+"),(" - val=val[:-2] - self.__sql="INSERT INTO "+self.__table+" ("+strs[3:]+") VALUES "+val - if self.__joinstr: - self.__sql=self.__sql+" "+self.__joinstr - if self.__where: - if isinstance(self.__where,str): - if self.__wheres: - if len(self.__wheres) == 2: - if isinstance(self.__wheres[1],list): - self.__sql=self.__sql + " WHERE %s %s (" % (self.__where,self.__operator(self.__wheres[0])) - for k in self.__wheres[1]: - self.__sql=self.__sql+str(k)+"," - self.__sql=self.__sql[:-1]+")" - else: - self.__sql=self.__sql + " WHERE %s %s '%s'" % (self.__where,self.__operator(self.__wheres[0]),self.__wheres[1]) - elif len(self.__wheres) > 2: - if self.__wheres[0]=='in': - strs=str(self.__wheres[1]) - i=0 - for k in self.__wheres: - if i > 1: - strs=strs+","+str(k) - i=i+1 - self.__sql=self.__sql + " WHERE %s in (%s)" % (self.__where,strs) - else: - self.__sql=self.__sql + " WHERE %s = '%s'" % (self.__where,self.__wheres[0]) - else: - self.__sql=self.__sql + " WHERE %s" % self.__where - elif isinstance(self.__where,list): - self.__sql=self.__sql + " WHERE %s" % self.__listTrans() - else: - print("参数where类型错误") - if self.__order: - s='' - if isinstance(self.__order,list): - for strs in self.__order: - if isinstance(strs,str): - s=s+strs+"," - else: - pass - for key in strs: - s=s+key+" "+strs[key] - s=s+"," - s=s[:-1] - if isinstance(self.__order,str): - if self.__order1: - if len(self.__order1) > 1: - if self.__order1[len(self.__order1)-1] == 'desc' or self.__order1[len(self.__order1)-1] == 'asc': - i=0 - while iLL", hash_pass) - hash_message_n = struct.unpack(">LL", hash_message) - - rand_st = RandStruct_323( - hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1] - ) - outbuf = io.BytesIO() - for _ in range(min(SCRAMBLE_LENGTH_323, len(message))): - outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) - extra = int2byte(int(rand_st.my_rnd() * 31)) - out = outbuf.getvalue() - outbuf = io.BytesIO() - for c in out: - outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) - return outbuf.getvalue() - - -def _hash_password_323(password): - nr = 1345345333 - add = 7 - nr2 = 0x12345671 - - # x in py3 is numbers, p27 is chars - for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]: - nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF - nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF - add = (add + c) & 0xFFFFFFFF - - r1 = nr & ((1 << 31) - 1) # kill sign bits - r2 = nr2 & ((1 << 31) - 1) - return struct.pack(">LL", r1, r2) - - -# sha256_password - - -def _roundtrip(conn, send_data): - conn.write_packet(send_data) - pkt = conn._read_packet() - pkt.check_error() - return pkt - - -def _xor_password(password, salt): - password_bytes = bytearray(password) - salt = bytearray(salt) # for PY2 compat. - salt_len = len(salt) - for i in range(len(password_bytes)): - password_bytes[i] ^= salt[i % salt_len] - return bytes(password_bytes) - - -def sha2_rsa_encrypt(password, salt, public_key): - """Encrypt password with salt and public_key. - - Used for sha256_password and caching_sha2_password. - """ - if not _have_cryptography: - raise RuntimeError("cryptography is required for sha256_password or caching_sha2_password") - message = _xor_password(password + b'\0', salt) - rsa_key = serialization.load_pem_public_key(public_key, default_backend()) - return rsa_key.encrypt( - message, - padding.OAEP( - mgf=padding.MGF1(algorithm=hashes.SHA1()), - algorithm=hashes.SHA1(), - label=None, - ), - ) - - -def sha256_password_auth(conn, pkt): - if conn._secure: - if DEBUG: - print("sha256: Sending plain password") - data = conn.password + b'\0' - return _roundtrip(conn, data) - - if pkt.is_auth_switch_request(): - conn.salt = pkt.read_all() - if not conn.server_public_key and conn.password: - # Request server public key - if DEBUG: - print("sha256: Requesting server public key") - pkt = _roundtrip(conn, b'\1') - - if pkt.is_extra_auth_data(): - conn.server_public_key = pkt._data[1:] - if DEBUG: - print("Received public key:\n", conn.server_public_key.decode('ascii')) - - if conn.password: - if not conn.server_public_key: - raise OperationalError("Couldn't receive server's public key") - - data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) - else: - data = b'' - - return _roundtrip(conn, data) - - -def scramble_caching_sha2(password, nonce): - # (bytes, bytes) -> bytes - """Scramble algorithm used in cached_sha2_password fast path. - - XOR(SHA256(password), SHA256(SHA256(SHA256(password)), nonce)) - """ - if not password: - return b'' - - p1 = hashlib.sha256(password).digest() - p2 = hashlib.sha256(p1).digest() - p3 = hashlib.sha256(p2 + nonce).digest() - - res = bytearray(p1) - if PY2: - p3 = bytearray(p3) - for i in range(len(p3)): - res[i] ^= p3[i] - - return bytes(res) - - -def caching_sha2_password_auth(conn, pkt): - # No password fast path - if not conn.password: - return _roundtrip(conn, b'') - - if pkt.is_auth_switch_request(): - # Try from fast auth - if DEBUG: - print("caching sha2: Trying fast path") - conn.salt = pkt.read_all() - scrambled = scramble_caching_sha2(conn.password, conn.salt) - pkt = _roundtrip(conn, scrambled) - # else: fast auth is tried in initial handshake - - if not pkt.is_extra_auth_data(): - raise OperationalError( - "caching sha2: Unknown packet for fast auth: %s" % pkt._data[:1] - ) - - # magic numbers: - # 2 - request public key - # 3 - fast auth succeeded - # 4 - need full auth - - pkt.advance(1) - n = pkt.read_uint8() - - if n == 3: - if DEBUG: - print("caching sha2: succeeded by fast path.") - pkt = conn._read_packet() - pkt.check_error() # pkt must be OK packet - return pkt - - if n != 4: - raise OperationalError("caching sha2: Unknwon result for fast auth: %s" % n) - - if DEBUG: - print("caching sha2: Trying full auth...") - - if conn._secure: - if DEBUG: - print("caching sha2: Sending plain password via secure connection") - return _roundtrip(conn, conn.password + b'\0') - - if not conn.server_public_key: - pkt = _roundtrip(conn, b'\x02') # Request public key - if not pkt.is_extra_auth_data(): - raise OperationalError( - "caching sha2: Unknown packet for public key: %s" % pkt._data[:1] - ) - - conn.server_public_key = pkt._data[1:] - if DEBUG: - print(conn.server_public_key.decode('ascii')) - - data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) - pkt = _roundtrip(conn, data) diff --git a/utill/db/pymysql/_compat.py b/utill/db/pymysql/_compat.py deleted file mode 100644 index 252789e..0000000 --- a/utill/db/pymysql/_compat.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys - -PY2 = sys.version_info[0] == 2 -PYPY = hasattr(sys, 'pypy_translation_info') -JYTHON = sys.platform.startswith('java') -IRONPYTHON = sys.platform == 'cli' -CPYTHON = not PYPY and not JYTHON and not IRONPYTHON - -if PY2: - import __builtin__ - range_type = xrange - text_type = unicode - long_type = long - str_type = basestring - unichr = __builtin__.unichr -else: - range_type = range - text_type = str - long_type = int - str_type = str - unichr = chr diff --git a/utill/db/pymysql/_socketio.py b/utill/db/pymysql/_socketio.py deleted file mode 100644 index 6a11d42..0000000 --- a/utill/db/pymysql/_socketio.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -SocketIO imported from socket module in Python 3. - -Copyright (c) 2001-2013 Python Software Foundation; All Rights Reserved. -""" - -from socket import * -import io -import errno - -__all__ = ['SocketIO'] - -EINTR = errno.EINTR -_blocking_errnos = (errno.EAGAIN, errno.EWOULDBLOCK) - -class SocketIO(io.RawIOBase): - - """Raw I/O implementation for stream sockets. - - This class supports the makefile() method on sockets. It provides - the raw I/O interface on top of a socket object. - """ - - # One might wonder why not let FileIO do the job instead. There are two - # main reasons why FileIO is not adapted: - # - it wouldn't work under Windows (where you can't used read() and - # write() on a socket handle) - # - it wouldn't work with socket timeouts (FileIO would ignore the - # timeout and consider the socket non-blocking) - - # XXX More docs - - def __init__(self, sock, mode): - if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): - raise ValueError("invalid mode: %r" % mode) - io.RawIOBase.__init__(self) - self._sock = sock - if "b" not in mode: - mode += "b" - self._mode = mode - self._reading = "r" in mode - self._writing = "w" in mode - self._timeout_occurred = False - - def readinto(self, b): - """Read up to len(b) bytes into the writable buffer *b* and return - the number of bytes read. If the socket is non-blocking and no bytes - are available, None is returned. - - If *b* is non-empty, a 0 return value indicates that the connection - was shutdown at the other end. - """ - self._checkClosed() - self._checkReadable() - if self._timeout_occurred: - raise IOError("cannot read from timed out object") - while True: - try: - return self._sock.recv_into(b) - except timeout: - self._timeout_occurred = True - raise - except error as e: - n = e.args[0] - if n == EINTR: - continue - if n in _blocking_errnos: - return None - raise - - def write(self, b): - """Write the given bytes or bytearray object *b* to the socket - and return the number of bytes written. This can be less than - len(b) if not all data could be written. If the socket is - non-blocking and no bytes could be written None is returned. - """ - self._checkClosed() - self._checkWritable() - try: - return self._sock.send(b) - except error as e: - # XXX what about EINTR? - if e.args[0] in _blocking_errnos: - return None - raise - - def readable(self): - """True if the SocketIO is open for reading. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._reading - - def writable(self): - """True if the SocketIO is open for writing. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._writing - - def seekable(self): - """True if the SocketIO is open for seeking. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return super().seekable() - - def fileno(self): - """Return the file descriptor of the underlying socket. - """ - self._checkClosed() - return self._sock.fileno() - - @property - def name(self): - if not self.closed: - return self.fileno() - else: - return -1 - - @property - def mode(self): - return self._mode - - def close(self): - """Close the SocketIO object. This doesn't close the underlying - socket, except if all references to it have disappeared. - """ - if self.closed: - return - io.RawIOBase.close(self) - self._sock._decref_socketios() - self._sock = None - diff --git a/utill/db/pymysql/charset.py b/utill/db/pymysql/charset.py deleted file mode 100644 index 07d8063..0000000 --- a/utill/db/pymysql/charset.py +++ /dev/null @@ -1,212 +0,0 @@ -MBLENGTH = { - 8:1, - 33:3, - 88:2, - 91:2 - } - - -class Charset(object): - def __init__(self, id, name, collation, is_default): - self.id, self.name, self.collation = id, name, collation - self.is_default = is_default == 'Yes' - - def __repr__(self): - return "Charset(id=%s, name=%r, collation=%r)" % ( - self.id, self.name, self.collation) - - @property - def encoding(self): - name = self.name - if name in ('utf8mb4', 'utf8mb3'): - return 'utf8' - return name - - @property - def is_binary(self): - return self.id == 63 - - -class Charsets: - def __init__(self): - self._by_id = {} - self._by_name = {} - - def add(self, c): - self._by_id[c.id] = c - if c.is_default: - self._by_name[c.name] = c - - def by_id(self, id): - return self._by_id[id] - - def by_name(self, name): - return self._by_name.get(name.lower()) - -_charsets = Charsets() -""" -Generated with: - -mysql -N -s -e "select id, character_set_name, collation_name, is_default -from information_schema.collations order by id;" | python -c "import sys -for l in sys.stdin.readlines(): - id, name, collation, is_default = l.split(chr(9)) - print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \ - % (id, name, collation, is_default.strip()) -" - -""" -_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes')) -_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', '')) -_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes')) -_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes')) -_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', '')) -_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes')) -_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes')) -_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes')) -_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes')) -_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes')) -_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes')) -_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes')) -_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes')) -_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', '')) -_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', '')) -_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes')) -_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes')) -_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes')) -_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', '')) -_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', '')) -_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes')) -_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', '')) -_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes')) -_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes')) -_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes')) -_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', '')) -_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes')) -_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', '')) -_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes')) -_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) -_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) -_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) -_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) -_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) -_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) -_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) -_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes')) -_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes')) -_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes')) -_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', '')) -_charsets.add(Charset(43, 'macce', 'macce_bin', '')) -_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', '')) -_charsets.add(Charset(45, 'utf8mb4', 'utf8mb4_general_ci', 'Yes')) -_charsets.add(Charset(46, 'utf8mb4', 'utf8mb4_bin', '')) -_charsets.add(Charset(47, 'latin1', 'latin1_bin', '')) -_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', '')) -_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', '')) -_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) -_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) -_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) -_charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) -_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) -_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) -_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) -_charsets.add(Charset(63, 'binary', 'binary', 'Yes')) -_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) -_charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) -_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', '')) -_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', '')) -_charsets.add(Charset(68, 'cp866', 'cp866_bin', '')) -_charsets.add(Charset(69, 'dec8', 'dec8_bin', '')) -_charsets.add(Charset(70, 'greek', 'greek_bin', '')) -_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', '')) -_charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) -_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) -_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) -_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) -_charsets.add(Charset(76, 'utf8', 'utf8_tolower_ci', '')) -_charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) -_charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) -_charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) -_charsets.add(Charset(80, 'cp850', 'cp850_bin', '')) -_charsets.add(Charset(81, 'cp852', 'cp852_bin', '')) -_charsets.add(Charset(82, 'swe7', 'swe7_bin', '')) -_charsets.add(Charset(83, 'utf8', 'utf8_bin', '')) -_charsets.add(Charset(84, 'big5', 'big5_bin', '')) -_charsets.add(Charset(85, 'euckr', 'euckr_bin', '')) -_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) -_charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) -_charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) -_charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) -_charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) -_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) -_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) -_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', '')) -_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes')) -_charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) -_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) -_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) -_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) -_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) -_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) -_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) -_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', '')) -_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', '')) -_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', '')) -_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', '')) -_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', '')) -_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', '')) -_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', '')) -_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', '')) -_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', '')) -_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', '')) -_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', '')) -_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', '')) -_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', '')) -_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) -_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) -_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) -_charsets.add(Charset(211, 'utf8', 'utf8_sinhala_ci', '')) -_charsets.add(Charset(212, 'utf8', 'utf8_german2_ci', '')) -_charsets.add(Charset(213, 'utf8', 'utf8_croatian_ci', '')) -_charsets.add(Charset(214, 'utf8', 'utf8_unicode_520_ci', '')) -_charsets.add(Charset(215, 'utf8', 'utf8_vietnamese_ci', '')) -_charsets.add(Charset(223, 'utf8', 'utf8_general_mysql500_ci', '')) -_charsets.add(Charset(224, 'utf8mb4', 'utf8mb4_unicode_ci', '')) -_charsets.add(Charset(225, 'utf8mb4', 'utf8mb4_icelandic_ci', '')) -_charsets.add(Charset(226, 'utf8mb4', 'utf8mb4_latvian_ci', '')) -_charsets.add(Charset(227, 'utf8mb4', 'utf8mb4_romanian_ci', '')) -_charsets.add(Charset(228, 'utf8mb4', 'utf8mb4_slovenian_ci', '')) -_charsets.add(Charset(229, 'utf8mb4', 'utf8mb4_polish_ci', '')) -_charsets.add(Charset(230, 'utf8mb4', 'utf8mb4_estonian_ci', '')) -_charsets.add(Charset(231, 'utf8mb4', 'utf8mb4_spanish_ci', '')) -_charsets.add(Charset(232, 'utf8mb4', 'utf8mb4_swedish_ci', '')) -_charsets.add(Charset(233, 'utf8mb4', 'utf8mb4_turkish_ci', '')) -_charsets.add(Charset(234, 'utf8mb4', 'utf8mb4_czech_ci', '')) -_charsets.add(Charset(235, 'utf8mb4', 'utf8mb4_danish_ci', '')) -_charsets.add(Charset(236, 'utf8mb4', 'utf8mb4_lithuanian_ci', '')) -_charsets.add(Charset(237, 'utf8mb4', 'utf8mb4_slovak_ci', '')) -_charsets.add(Charset(238, 'utf8mb4', 'utf8mb4_spanish2_ci', '')) -_charsets.add(Charset(239, 'utf8mb4', 'utf8mb4_roman_ci', '')) -_charsets.add(Charset(240, 'utf8mb4', 'utf8mb4_persian_ci', '')) -_charsets.add(Charset(241, 'utf8mb4', 'utf8mb4_esperanto_ci', '')) -_charsets.add(Charset(242, 'utf8mb4', 'utf8mb4_hungarian_ci', '')) -_charsets.add(Charset(243, 'utf8mb4', 'utf8mb4_sinhala_ci', '')) -_charsets.add(Charset(244, 'utf8mb4', 'utf8mb4_german2_ci', '')) -_charsets.add(Charset(245, 'utf8mb4', 'utf8mb4_croatian_ci', '')) -_charsets.add(Charset(246, 'utf8mb4', 'utf8mb4_unicode_520_ci', '')) -_charsets.add(Charset(247, 'utf8mb4', 'utf8mb4_vietnamese_ci', '')) -_charsets.add(Charset(248, 'gb18030', 'gb18030_chinese_ci', 'Yes')) -_charsets.add(Charset(249, 'gb18030', 'gb18030_bin', '')) -_charsets.add(Charset(250, 'gb18030', 'gb18030_unicode_520_ci', '')) -_charsets.add(Charset(255, 'utf8mb4', 'utf8mb4_0900_ai_ci', '')) - -charset_by_name = _charsets.by_name -charset_by_id = _charsets.by_id - - -#TODO: remove this -def charset_to_encoding(name): - """Convert MySQL's charset name to Python's codec name""" - if name in ('utf8mb4', 'utf8mb3'): - return 'utf8' - return name diff --git a/utill/db/pymysql/connections.py b/utill/db/pymysql/connections.py deleted file mode 100644 index 2e4122b..0000000 --- a/utill/db/pymysql/connections.py +++ /dev/null @@ -1,1279 +0,0 @@ -# Python implementation of the MySQL client-server protocol -# http://dev.mysql.com/doc/internals/en/client-server-protocol.html -# Error codes: -# http://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html -from __future__ import print_function -from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON - -import errno -import io -import os -import socket -import struct -import sys -import traceback -import warnings - -from . import _auth - -from .charset import charset_by_name, charset_by_id -from .constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS -from . import converters -from .cursors import Cursor -from .optionfile import Parser -from .protocol import ( - dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper, - EOFPacketWrapper, LoadLocalPacketWrapper -) -from .util import byte2int, int2byte -from . import err, VERSION_STRING - -try: - import ssl - SSL_ENABLED = True -except ImportError: - ssl = None - SSL_ENABLED = False - -try: - import getpass - DEFAULT_USER = getpass.getuser() - del getpass -except (ImportError, KeyError): - # KeyError occurs when there's no entry in OS database for a current user. - DEFAULT_USER = None - -DEBUG = False - -_py_version = sys.version_info[:2] - -if PY2: - pass -elif _py_version < (3, 6): - # See http://bugs.python.org/issue24870 - _surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)] - - def _fast_surrogateescape(s): - return s.decode('latin1').translate(_surrogateescape_table) -else: - def _fast_surrogateescape(s): - return s.decode('ascii', 'surrogateescape') - -# socket.makefile() in Python 2 is not usable because very inefficient and -# bad behavior about timeout. -# XXX: ._socketio doesn't work under IronPython. -if PY2 and not IRONPYTHON: - # read method of file-like returned by sock.makefile() is very slow. - # So we copy io-based one from Python 3. - from ._socketio import SocketIO - - def _makefile(sock, mode): - return io.BufferedReader(SocketIO(sock, mode)) -else: - # socket.makefile in Python 3 is nice. - def _makefile(sock, mode): - return sock.makefile(mode) - - -TEXT_TYPES = { - FIELD_TYPE.BIT, - FIELD_TYPE.BLOB, - FIELD_TYPE.LONG_BLOB, - FIELD_TYPE.MEDIUM_BLOB, - FIELD_TYPE.STRING, - FIELD_TYPE.TINY_BLOB, - FIELD_TYPE.VAR_STRING, - FIELD_TYPE.VARCHAR, - FIELD_TYPE.GEOMETRY, -} - - -DEFAULT_CHARSET = 'utf8mb4' - -MAX_PACKET_LEN = 2**24-1 - - -def pack_int24(n): - return struct.pack('`_ in the - specification. - """ - - _sock = None - _auth_plugin_name = '' - _closed = False - _secure = False - - def __init__(self, host=None, user=None, password="", - database=None, port=0, unix_socket=None, - charset='', sql_mode=None, - read_default_file=None, conv=None, use_unicode=None, - client_flag=0, cursorclass=Cursor, init_command=None, - connect_timeout=10, ssl=None, read_default_group=None, - compress=None, named_pipe=None, - autocommit=False, db=None, passwd=None, local_infile=False, - max_allowed_packet=16*1024*1024, defer_connect=False, - auth_plugin_map=None, read_timeout=None, write_timeout=None, - bind_address=None, binary_prefix=False, program_name=None, - server_public_key=None): - if use_unicode is None and sys.version_info[0] > 2: - use_unicode = True - - if db is not None and database is None: - database = db - if passwd is not None and not password: - password = passwd - - if compress or named_pipe: - raise NotImplementedError("compress and named_pipe arguments are not supported") - - self._local_infile = bool(local_infile) - if self._local_infile: - client_flag |= CLIENT.LOCAL_FILES - - if read_default_group and not read_default_file: - if sys.platform.startswith("win"): - read_default_file = "c:\\my.ini" - else: - read_default_file = "/etc/my.cnf" - - if read_default_file: - if not read_default_group: - read_default_group = "client" - - cfg = Parser() - cfg.read(os.path.expanduser(read_default_file)) - - def _config(key, arg): - if arg: - return arg - try: - return cfg.get(read_default_group, key) - except Exception: - return arg - - user = _config("user", user) - password = _config("password", password) - host = _config("host", host) - database = _config("database", database) - unix_socket = _config("socket", unix_socket) - port = int(_config("port", port)) - bind_address = _config("bind-address", bind_address) - charset = _config("default-character-set", charset) - if not ssl: - ssl = {} - if isinstance(ssl, dict): - for key in ["ca", "capath", "cert", "key", "cipher"]: - value = _config("ssl-" + key, ssl.get(key)) - if value: - ssl[key] = value - - self.ssl = False - if ssl: - if not SSL_ENABLED: - raise NotImplementedError("ssl module not found") - self.ssl = True - client_flag |= CLIENT.SSL - self.ctx = self._create_ssl_ctx(ssl) - - self.host = host or "localhost" - self.port = port or 3306 - self.user = user or DEFAULT_USER - self.password = password or b"" - if isinstance(self.password, text_type): - self.password = self.password.encode('latin1') - self.db = database - self.unix_socket = unix_socket - self.bind_address = bind_address - if not (0 < connect_timeout <= 31536000): - raise ValueError("connect_timeout should be >0 and <=31536000") - self.connect_timeout = connect_timeout or None - if read_timeout is not None and read_timeout <= 0: - raise ValueError("read_timeout should be >= 0") - self._read_timeout = read_timeout - if write_timeout is not None and write_timeout <= 0: - raise ValueError("write_timeout should be >= 0") - self._write_timeout = write_timeout - if charset: - self.charset = charset - self.use_unicode = True - else: - self.charset = DEFAULT_CHARSET - self.use_unicode = False - - if use_unicode is not None: - self.use_unicode = use_unicode - - self.encoding = charset_by_name(self.charset).encoding - - client_flag |= CLIENT.CAPABILITIES - if self.db: - client_flag |= CLIENT.CONNECT_WITH_DB - - self.client_flag = client_flag - - self.cursorclass = cursorclass - - self._result = None - self._affected_rows = 0 - self.host_info = "Not connected" - - # specified autocommit mode. None means use server default. - self.autocommit_mode = autocommit - - if conv is None: - conv = converters.conversions - - # Need for MySQLdb compatibility. - self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int} - self.decoders = {k: v for (k, v) in conv.items() if type(k) is int} - self.sql_mode = sql_mode - self.init_command = init_command - self.max_allowed_packet = max_allowed_packet - self._auth_plugin_map = auth_plugin_map or {} - self._binary_prefix = binary_prefix - self.server_public_key = server_public_key - - self._connect_attrs = { - '_client_name': 'pymysql', - '_pid': str(os.getpid()), - '_client_version': VERSION_STRING, - } - - if program_name: - self._connect_attrs["program_name"] = program_name - - if defer_connect: - self._sock = None - else: - self.connect() - - def _create_ssl_ctx(self, sslp): - if isinstance(sslp, ssl.SSLContext): - return sslp - ca = sslp.get('ca') - capath = sslp.get('capath') - hasnoca = ca is None and capath is None - ctx = ssl.create_default_context(cafile=ca, capath=capath) - ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True) - ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED - if 'cert' in sslp: - ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key')) - if 'cipher' in sslp: - ctx.set_ciphers(sslp['cipher']) - ctx.options |= ssl.OP_NO_SSLv2 - ctx.options |= ssl.OP_NO_SSLv3 - return ctx - - def close(self): - """ - Send the quit message and close the socket. - - See `Connection.close() `_ - in the specification. - - :raise Error: If the connection is already closed. - """ - if self._closed: - raise err.Error("Already closed") - self._closed = True - if self._sock is None: - return - send_data = struct.pack('`_ - in the specification. - """ - self._execute_command(COMMAND.COM_QUERY, "COMMIT") - self._read_ok_packet() - - def rollback(self): - """ - Roll back the current transaction. - - See `Connection.rollback() `_ - in the specification. - """ - self._execute_command(COMMAND.COM_QUERY, "ROLLBACK") - self._read_ok_packet() - - def show_warnings(self): - """Send the "SHOW WARNINGS" SQL command.""" - self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS") - result = MySQLResult(self) - result.read() - return result.rows - - def select_db(self, db): - """ - Set current db. - - :param db: The name of the db. - """ - self._execute_command(COMMAND.COM_INIT_DB, db) - self._read_ok_packet() - - def escape(self, obj, mapping=None): - """Escape whatever value you pass to it. - - Non-standard, for internal use; do not use this in your applications. - """ - if isinstance(obj, str_type): - return "'" + self.escape_string(obj) + "'" - if isinstance(obj, (bytes, bytearray)): - ret = self._quote_bytes(obj) - if self._binary_prefix: - ret = "_binary" + ret - return ret - return converters.escape_item(obj, self.charset, mapping=mapping) - - def literal(self, obj): - """Alias for escape() - - Non-standard, for internal use; do not use this in your applications. - """ - return self.escape(obj, self.encoders) - - def escape_string(self, s): - if (self.server_status & - SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): - return s.replace("'", "''") - return converters.escape_string(s) - - def _quote_bytes(self, s): - if (self.server_status & - SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): - return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),) - return converters.escape_bytes(s) - - def cursor(self, cursor=None): - """ - Create a new cursor to execute queries with. - - :param cursor: The type of cursor to create; one of :py:class:`Cursor`, - :py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`. - None means use Cursor. - """ - if cursor: - return cursor(self) - return self.cursorclass(self) - - def __enter__(self): - """Context manager that returns a Cursor""" - warnings.warn( - "Context manager API of Connection object is deprecated; Use conn.begin()", - DeprecationWarning) - return self.cursor() - - def __exit__(self, exc, value, traceback): - """On successful exit, commit. On exception, rollback""" - if exc: - self.rollback() - else: - self.commit() - - # The following methods are INTERNAL USE ONLY (called from Cursor) - def query(self, sql, unbuffered=False): - # if DEBUG: - # print("DEBUG: sending query:", sql) - if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON): - if PY2: - sql = sql.encode(self.encoding) - else: - sql = sql.encode(self.encoding, 'surrogateescape') - self._execute_command(COMMAND.COM_QUERY, sql) - self._affected_rows = self._read_query_result(unbuffered=unbuffered) - return self._affected_rows - - def next_result(self, unbuffered=False): - self._affected_rows = self._read_query_result(unbuffered=unbuffered) - return self._affected_rows - - def affected_rows(self): - return self._affected_rows - - def kill(self, thread_id): - arg = struct.pack('= 5: - self.client_flag |= CLIENT.MULTI_RESULTS - - if self.user is None: - raise ValueError("Did not specify a username") - - charset_id = charset_by_name(self.charset).id - if isinstance(self.user, text_type): - self.user = self.user.encode(self.encoding) - - data_init = struct.pack('=5.0) - data += authresp + b'\0' - - if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB: - if isinstance(self.db, text_type): - self.db = self.db.encode(self.encoding) - data += self.db + b'\0' - - if self.server_capabilities & CLIENT.PLUGIN_AUTH: - data += (plugin_name or b'') + b'\0' - - if self.server_capabilities & CLIENT.CONNECT_ATTRS: - connect_attrs = b'' - for k, v in self._connect_attrs.items(): - k = k.encode('utf-8') - connect_attrs += struct.pack('B', len(k)) + k - v = v.encode('utf-8') - connect_attrs += struct.pack('B', len(v)) + v - data += struct.pack('B', len(connect_attrs)) + connect_attrs - - self.write_packet(data) - auth_packet = self._read_packet() - - # if authentication method isn't accepted the first byte - # will have the octet 254 - if auth_packet.is_auth_switch_request(): - if DEBUG: print("received auth switch") - # https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest - auth_packet.read_uint8() # 0xfe packet identifier - plugin_name = auth_packet.read_string() - if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None: - auth_packet = self._process_auth(plugin_name, auth_packet) - else: - # send legacy handshake - data = _auth.scramble_old_password(self.password, self.salt) + b'\0' - self.write_packet(data) - auth_packet = self._read_packet() - elif auth_packet.is_extra_auth_data(): - if DEBUG: - print("received extra data") - # https://dev.mysql.com/doc/internals/en/successful-authentication.html - if self._auth_plugin_name == "caching_sha2_password": - auth_packet = _auth.caching_sha2_password_auth(self, auth_packet) - elif self._auth_plugin_name == "sha256_password": - auth_packet = _auth.sha256_password_auth(self, auth_packet) - else: - raise err.OperationalError("Received extra packet for auth method %r", self._auth_plugin_name) - - if DEBUG: print("Succeed to auth") - - def _process_auth(self, plugin_name, auth_packet): - handler = self._get_auth_plugin_handler(plugin_name) - if handler: - try: - return handler.authenticate(auth_packet) - except AttributeError: - if plugin_name != b'dialog': - raise err.OperationalError(2059, "Authentication plugin '%s'" - " not loaded: - %r missing authenticate method" % (plugin_name, type(handler))) - if plugin_name == b"caching_sha2_password": - return _auth.caching_sha2_password_auth(self, auth_packet) - elif plugin_name == b"sha256_password": - return _auth.sha256_password_auth(self, auth_packet) - elif plugin_name == b"mysql_native_password": - data = _auth.scramble_native_password(self.password, auth_packet.read_all()) - elif plugin_name == b"mysql_old_password": - data = _auth.scramble_old_password(self.password, auth_packet.read_all()) + b'\0' - elif plugin_name == b"mysql_clear_password": - # https://dev.mysql.com/doc/internals/en/clear-text-authentication.html - data = self.password + b'\0' - elif plugin_name == b"dialog": - pkt = auth_packet - while True: - flag = pkt.read_uint8() - echo = (flag & 0x06) == 0x02 - last = (flag & 0x01) == 0x01 - prompt = pkt.read_all() - - if prompt == b"Password: ": - self.write_packet(self.password + b'\0') - elif handler: - resp = 'no response - TypeError within plugin.prompt method' - try: - resp = handler.prompt(echo, prompt) - self.write_packet(resp + b'\0') - except AttributeError: - raise err.OperationalError(2059, "Authentication plugin '%s'" \ - " not loaded: - %r missing prompt method" % (plugin_name, handler)) - except TypeError: - raise err.OperationalError(2061, "Authentication plugin '%s'" \ - " %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt)) - else: - raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler)) - pkt = self._read_packet() - pkt.check_error() - if pkt.is_ok_packet() or last: - break - return pkt - else: - raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name) - - self.write_packet(data) - pkt = self._read_packet() - pkt.check_error() - return pkt - - def _get_auth_plugin_handler(self, plugin_name): - plugin_class = self._auth_plugin_map.get(plugin_name) - if not plugin_class and isinstance(plugin_name, bytes): - plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii')) - if plugin_class: - try: - handler = plugin_class(self) - except TypeError: - raise err.OperationalError(2059, "Authentication plugin '%s'" - " not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class)) - else: - handler = None - return handler - - # _mysql support - def thread_id(self): - return self.server_thread_id[0] - - def character_set_name(self): - return self.charset - - def get_host_info(self): - return self.host_info - - def get_proto_info(self): - return self.protocol_version - - def _get_server_information(self): - i = 0 - packet = self._read_packet() - data = packet.get_all_data() - - self.protocol_version = byte2int(data[i:i+1]) - i += 1 - - server_end = data.find(b'\0', i) - self.server_version = data[i:server_end].decode('latin1') - i = server_end + 1 - - self.server_thread_id = struct.unpack('= i + 6: - lang, stat, cap_h, salt_len = struct.unpack('= i + salt_len: - # salt_len includes auth_plugin_data_part_1 and filler - self.salt += data[i:i+salt_len] - i += salt_len - - i+=1 - # AUTH PLUGIN NAME may appear here. - if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i: - # Due to Bug#59453 the auth-plugin-name is missing the terminating - # NUL-char in versions prior to 5.5.10 and 5.6.2. - # ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake - # didn't use version checks as mariadb is corrected and reports - # earlier than those two. - server_end = data.find(b'\0', i) - if server_end < 0: # pragma: no cover - very specific upstream bug - # not found \0 and last field so take it all - self._auth_plugin_name = data[i:].decode('utf-8') - else: - self._auth_plugin_name = data[i:server_end].decode('utf-8') - - def get_server_info(self): - return self.server_version - - Warning = err.Warning - Error = err.Error - InterfaceError = err.InterfaceError - DatabaseError = err.DatabaseError - DataError = err.DataError - OperationalError = err.OperationalError - IntegrityError = err.IntegrityError - InternalError = err.InternalError - ProgrammingError = err.ProgrammingError - NotSupportedError = err.NotSupportedError - - -class MySQLResult(object): - - def __init__(self, connection): - """ - :type connection: Connection - """ - self.connection = connection - self.affected_rows = None - self.insert_id = None - self.server_status = None - self.warning_count = 0 - self.message = None - self.field_count = 0 - self.description = None - self.rows = None - self.has_next = None - self.unbuffered_active = False - - def __del__(self): - if self.unbuffered_active: - self._finish_unbuffered_query() - - def read(self): - try: - first_packet = self.connection._read_packet() - - if first_packet.is_ok_packet(): - self._read_ok_packet(first_packet) - elif first_packet.is_load_local_packet(): - self._read_load_local_packet(first_packet) - else: - self._read_result_packet(first_packet) - finally: - self.connection = None - - def init_unbuffered_query(self): - """ - :raise OperationalError: If the connection to the MySQL server is lost. - :raise InternalError: - """ - self.unbuffered_active = True - first_packet = self.connection._read_packet() - - if first_packet.is_ok_packet(): - self._read_ok_packet(first_packet) - self.unbuffered_active = False - self.connection = None - elif first_packet.is_load_local_packet(): - self._read_load_local_packet(first_packet) - self.unbuffered_active = False - self.connection = None - else: - self.field_count = first_packet.read_length_encoded_integer() - self._get_descriptions() - - # Apparently, MySQLdb picks this number because it's the maximum - # value of a 64bit unsigned integer. Since we're emulating MySQLdb, - # we set it to this instead of None, which would be preferred. - self.affected_rows = 18446744073709551615 - - def _read_ok_packet(self, first_packet): - ok_packet = OKPacketWrapper(first_packet) - self.affected_rows = ok_packet.affected_rows - self.insert_id = ok_packet.insert_id - self.server_status = ok_packet.server_status - self.warning_count = ok_packet.warning_count - self.message = ok_packet.message - self.has_next = ok_packet.has_next - - def _read_load_local_packet(self, first_packet): - if not self.connection._local_infile: - raise RuntimeError( - "**WARN**: Received LOAD_LOCAL packet but local_infile option is false.") - load_packet = LoadLocalPacketWrapper(first_packet) - sender = LoadLocalFile(load_packet.filename, self.connection) - try: - sender.send_data() - except: - self.connection._read_packet() # skip ok packet - raise - - ok_packet = self.connection._read_packet() - if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error - raise err.OperationalError(2014, "Commands Out of Sync") - self._read_ok_packet(ok_packet) - - def _check_packet_is_eof(self, packet): - if not packet.is_eof_packet(): - return False - #TODO: Support CLIENT.DEPRECATE_EOF - # 1) Add DEPRECATE_EOF to CAPABILITIES - # 2) Mask CAPABILITIES with server_capabilities - # 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper - wp = EOFPacketWrapper(packet) - self.warning_count = wp.warning_count - self.has_next = wp.has_next - return True - - def _read_result_packet(self, first_packet): - self.field_count = first_packet.read_length_encoded_integer() - self._get_descriptions() - self._read_rowdata_packet() - - def _read_rowdata_packet_unbuffered(self): - # Check if in an active query - if not self.unbuffered_active: - return - - # EOF - packet = self.connection._read_packet() - if self._check_packet_is_eof(packet): - self.unbuffered_active = False - self.connection = None - self.rows = None - return - - row = self._read_row_from_packet(packet) - self.affected_rows = 1 - self.rows = (row,) # rows should tuple of row for MySQL-python compatibility. - return row - - def _finish_unbuffered_query(self): - # After much reading on the MySQL protocol, it appears that there is, - # in fact, no way to stop MySQL from sending all the data after - # executing a query, so we just spin, and wait for an EOF packet. - while self.unbuffered_active: - packet = self.connection._read_packet() - if self._check_packet_is_eof(packet): - self.unbuffered_active = False - self.connection = None # release reference to kill cyclic reference. - - def _read_rowdata_packet(self): - """Read a rowdata packet for each data row in the result set.""" - rows = [] - while True: - packet = self.connection._read_packet() - if self._check_packet_is_eof(packet): - self.connection = None # release reference to kill cyclic reference. - break - rows.append(self._read_row_from_packet(packet)) - - self.affected_rows = len(rows) - self.rows = tuple(rows) - - def _read_row_from_packet(self, packet): - row = [] - for encoding, converter in self.converters: - try: - data = packet.read_length_coded_string() - except IndexError: - # No more columns in this row - # See https://github.com/PyMySQL/PyMySQL/pull/434 - break - if data is not None: - if encoding is not None: - data = data.decode(encoding) - if DEBUG: print("DEBUG: DATA = ", data) - if converter is not None: - data = converter(data) - row.append(data) - return tuple(row) - - def _get_descriptions(self): - """Read a column descriptor packet for each column in the result.""" - self.fields = [] - self.converters = [] - use_unicode = self.connection.use_unicode - conn_encoding = self.connection.encoding - description = [] - - for i in range_type(self.field_count): - field = self.connection._read_packet(FieldDescriptorPacket) - self.fields.append(field) - description.append(field.description()) - field_type = field.type_code - if use_unicode: - if field_type == FIELD_TYPE.JSON: - # When SELECT from JSON column: charset = binary - # When SELECT CAST(... AS JSON): charset = connection encoding - # This behavior is different from TEXT / BLOB. - # We should decode result by connection encoding regardless charsetnr. - # See https://github.com/PyMySQL/PyMySQL/issues/488 - encoding = conn_encoding # SELECT CAST(... AS JSON) - elif field_type in TEXT_TYPES: - if field.charsetnr == 63: # binary - # TEXTs with charset=binary means BINARY types. - encoding = None - else: - encoding = conn_encoding - else: - # Integers, Dates and Times, and other basic data is encoded in ascii - encoding = 'ascii' - else: - encoding = None - converter = self.connection.decoders.get(field_type) - if converter is converters.through: - converter = None - if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter)) - self.converters.append((encoding, converter)) - - eof_packet = self.connection._read_packet() - assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' - self.description = tuple(description) - - -class LoadLocalFile(object): - def __init__(self, filename, connection): - self.filename = filename - self.connection = connection - - def send_data(self): - """Send data packets from the local file to the server""" - if not self.connection._sock: - raise err.InterfaceError("(0, '')") - conn = self.connection - - try: - with open(self.filename, 'rb') as open_file: - packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough - while True: - chunk = open_file.read(packet_size) - if not chunk: - break - conn.write_packet(chunk) - except IOError: - raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename)) - finally: - # send the empty packet to signify we are done sending data - conn.write_packet(b'') diff --git a/utill/db/pymysql/constants/CLIENT.py b/utill/db/pymysql/constants/CLIENT.py deleted file mode 100644 index b42f152..0000000 --- a/utill/db/pymysql/constants/CLIENT.py +++ /dev/null @@ -1,31 +0,0 @@ -# https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags -LONG_PASSWORD = 1 -FOUND_ROWS = 1 << 1 -LONG_FLAG = 1 << 2 -CONNECT_WITH_DB = 1 << 3 -NO_SCHEMA = 1 << 4 -COMPRESS = 1 << 5 -ODBC = 1 << 6 -LOCAL_FILES = 1 << 7 -IGNORE_SPACE = 1 << 8 -PROTOCOL_41 = 1 << 9 -INTERACTIVE = 1 << 10 -SSL = 1 << 11 -IGNORE_SIGPIPE = 1 << 12 -TRANSACTIONS = 1 << 13 -SECURE_CONNECTION = 1 << 15 -MULTI_STATEMENTS = 1 << 16 -MULTI_RESULTS = 1 << 17 -PS_MULTI_RESULTS = 1 << 18 -PLUGIN_AUTH = 1 << 19 -CONNECT_ATTRS = 1 << 20 -PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21 -CAPABILITIES = ( - LONG_PASSWORD | LONG_FLAG | PROTOCOL_41 | TRANSACTIONS - | SECURE_CONNECTION | MULTI_RESULTS - | PLUGIN_AUTH | PLUGIN_AUTH_LENENC_CLIENT_DATA | CONNECT_ATTRS) - -# Not done yet -HANDLE_EXPIRED_PASSWORDS = 1 << 22 -SESSION_TRACK = 1 << 23 -DEPRECATE_EOF = 1 << 24 diff --git a/utill/db/pymysql/constants/COMMAND.py b/utill/db/pymysql/constants/COMMAND.py deleted file mode 100644 index 1da2755..0000000 --- a/utill/db/pymysql/constants/COMMAND.py +++ /dev/null @@ -1,33 +0,0 @@ - -COM_SLEEP = 0x00 -COM_QUIT = 0x01 -COM_INIT_DB = 0x02 -COM_QUERY = 0x03 -COM_FIELD_LIST = 0x04 -COM_CREATE_DB = 0x05 -COM_DROP_DB = 0x06 -COM_REFRESH = 0x07 -COM_SHUTDOWN = 0x08 -COM_STATISTICS = 0x09 -COM_PROCESS_INFO = 0x0a -COM_CONNECT = 0x0b -COM_PROCESS_KILL = 0x0c -COM_DEBUG = 0x0d -COM_PING = 0x0e -COM_TIME = 0x0f -COM_DELAYED_INSERT = 0x10 -COM_CHANGE_USER = 0x11 -COM_BINLOG_DUMP = 0x12 -COM_TABLE_DUMP = 0x13 -COM_CONNECT_OUT = 0x14 -COM_REGISTER_SLAVE = 0x15 -COM_STMT_PREPARE = 0x16 -COM_STMT_EXECUTE = 0x17 -COM_STMT_SEND_LONG_DATA = 0x18 -COM_STMT_CLOSE = 0x19 -COM_STMT_RESET = 0x1a -COM_SET_OPTION = 0x1b -COM_STMT_FETCH = 0x1c -COM_DAEMON = 0x1d -COM_BINLOG_DUMP_GTID = 0x1e -COM_END = 0x1f diff --git a/utill/db/pymysql/constants/CR.py b/utill/db/pymysql/constants/CR.py deleted file mode 100644 index 48ca956..0000000 --- a/utill/db/pymysql/constants/CR.py +++ /dev/null @@ -1,68 +0,0 @@ -# flake8: noqa -# errmsg.h -CR_ERROR_FIRST = 2000 -CR_UNKNOWN_ERROR = 2000 -CR_SOCKET_CREATE_ERROR = 2001 -CR_CONNECTION_ERROR = 2002 -CR_CONN_HOST_ERROR = 2003 -CR_IPSOCK_ERROR = 2004 -CR_UNKNOWN_HOST = 2005 -CR_SERVER_GONE_ERROR = 2006 -CR_VERSION_ERROR = 2007 -CR_OUT_OF_MEMORY = 2008 -CR_WRONG_HOST_INFO = 2009 -CR_LOCALHOST_CONNECTION = 2010 -CR_TCP_CONNECTION = 2011 -CR_SERVER_HANDSHAKE_ERR = 2012 -CR_SERVER_LOST = 2013 -CR_COMMANDS_OUT_OF_SYNC = 2014 -CR_NAMEDPIPE_CONNECTION = 2015 -CR_NAMEDPIPEWAIT_ERROR = 2016 -CR_NAMEDPIPEOPEN_ERROR = 2017 -CR_NAMEDPIPESETSTATE_ERROR = 2018 -CR_CANT_READ_CHARSET = 2019 -CR_NET_PACKET_TOO_LARGE = 2020 -CR_EMBEDDED_CONNECTION = 2021 -CR_PROBE_SLAVE_STATUS = 2022 -CR_PROBE_SLAVE_HOSTS = 2023 -CR_PROBE_SLAVE_CONNECT = 2024 -CR_PROBE_MASTER_CONNECT = 2025 -CR_SSL_CONNECTION_ERROR = 2026 -CR_MALFORMED_PACKET = 2027 -CR_WRONG_LICENSE = 2028 - -CR_NULL_POINTER = 2029 -CR_NO_PREPARE_STMT = 2030 -CR_PARAMS_NOT_BOUND = 2031 -CR_DATA_TRUNCATED = 2032 -CR_NO_PARAMETERS_EXISTS = 2033 -CR_INVALID_PARAMETER_NO = 2034 -CR_INVALID_BUFFER_USE = 2035 -CR_UNSUPPORTED_PARAM_TYPE = 2036 - -CR_SHARED_MEMORY_CONNECTION = 2037 -CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038 -CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039 -CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040 -CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041 -CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042 -CR_SHARED_MEMORY_MAP_ERROR = 2043 -CR_SHARED_MEMORY_EVENT_ERROR = 2044 -CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045 -CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046 -CR_CONN_UNKNOW_PROTOCOL = 2047 -CR_INVALID_CONN_HANDLE = 2048 -CR_SECURE_AUTH = 2049 -CR_FETCH_CANCELED = 2050 -CR_NO_DATA = 2051 -CR_NO_STMT_METADATA = 2052 -CR_NO_RESULT_SET = 2053 -CR_NOT_IMPLEMENTED = 2054 -CR_SERVER_LOST_EXTENDED = 2055 -CR_STMT_CLOSED = 2056 -CR_NEW_STMT_METADATA = 2057 -CR_ALREADY_CONNECTED = 2058 -CR_AUTH_PLUGIN_CANNOT_LOAD = 2059 -CR_DUPLICATE_CONNECTION_ATTR = 2060 -CR_AUTH_PLUGIN_ERR = 2061 -CR_ERROR_LAST = 2061 diff --git a/utill/db/pymysql/constants/ER.py b/utill/db/pymysql/constants/ER.py deleted file mode 100644 index 79b88af..0000000 --- a/utill/db/pymysql/constants/ER.py +++ /dev/null @@ -1,475 +0,0 @@ - -ERROR_FIRST = 1000 -HASHCHK = 1000 -NISAMCHK = 1001 -NO = 1002 -YES = 1003 -CANT_CREATE_FILE = 1004 -CANT_CREATE_TABLE = 1005 -CANT_CREATE_DB = 1006 -DB_CREATE_EXISTS = 1007 -DB_DROP_EXISTS = 1008 -DB_DROP_DELETE = 1009 -DB_DROP_RMDIR = 1010 -CANT_DELETE_FILE = 1011 -CANT_FIND_SYSTEM_REC = 1012 -CANT_GET_STAT = 1013 -CANT_GET_WD = 1014 -CANT_LOCK = 1015 -CANT_OPEN_FILE = 1016 -FILE_NOT_FOUND = 1017 -CANT_READ_DIR = 1018 -CANT_SET_WD = 1019 -CHECKREAD = 1020 -DISK_FULL = 1021 -DUP_KEY = 1022 -ERROR_ON_CLOSE = 1023 -ERROR_ON_READ = 1024 -ERROR_ON_RENAME = 1025 -ERROR_ON_WRITE = 1026 -FILE_USED = 1027 -FILSORT_ABORT = 1028 -FORM_NOT_FOUND = 1029 -GET_ERRNO = 1030 -ILLEGAL_HA = 1031 -KEY_NOT_FOUND = 1032 -NOT_FORM_FILE = 1033 -NOT_KEYFILE = 1034 -OLD_KEYFILE = 1035 -OPEN_AS_READONLY = 1036 -OUTOFMEMORY = 1037 -OUT_OF_SORTMEMORY = 1038 -UNEXPECTED_EOF = 1039 -CON_COUNT_ERROR = 1040 -OUT_OF_RESOURCES = 1041 -BAD_HOST_ERROR = 1042 -HANDSHAKE_ERROR = 1043 -DBACCESS_DENIED_ERROR = 1044 -ACCESS_DENIED_ERROR = 1045 -NO_DB_ERROR = 1046 -UNKNOWN_COM_ERROR = 1047 -BAD_NULL_ERROR = 1048 -BAD_DB_ERROR = 1049 -TABLE_EXISTS_ERROR = 1050 -BAD_TABLE_ERROR = 1051 -NON_UNIQ_ERROR = 1052 -SERVER_SHUTDOWN = 1053 -BAD_FIELD_ERROR = 1054 -WRONG_FIELD_WITH_GROUP = 1055 -WRONG_GROUP_FIELD = 1056 -WRONG_SUM_SELECT = 1057 -WRONG_VALUE_COUNT = 1058 -TOO_LONG_IDENT = 1059 -DUP_FIELDNAME = 1060 -DUP_KEYNAME = 1061 -DUP_ENTRY = 1062 -WRONG_FIELD_SPEC = 1063 -PARSE_ERROR = 1064 -EMPTY_QUERY = 1065 -NONUNIQ_TABLE = 1066 -INVALID_DEFAULT = 1067 -MULTIPLE_PRI_KEY = 1068 -TOO_MANY_KEYS = 1069 -TOO_MANY_KEY_PARTS = 1070 -TOO_LONG_KEY = 1071 -KEY_COLUMN_DOES_NOT_EXITS = 1072 -BLOB_USED_AS_KEY = 1073 -TOO_BIG_FIELDLENGTH = 1074 -WRONG_AUTO_KEY = 1075 -READY = 1076 -NORMAL_SHUTDOWN = 1077 -GOT_SIGNAL = 1078 -SHUTDOWN_COMPLETE = 1079 -FORCING_CLOSE = 1080 -IPSOCK_ERROR = 1081 -NO_SUCH_INDEX = 1082 -WRONG_FIELD_TERMINATORS = 1083 -BLOBS_AND_NO_TERMINATED = 1084 -TEXTFILE_NOT_READABLE = 1085 -FILE_EXISTS_ERROR = 1086 -LOAD_INFO = 1087 -ALTER_INFO = 1088 -WRONG_SUB_KEY = 1089 -CANT_REMOVE_ALL_FIELDS = 1090 -CANT_DROP_FIELD_OR_KEY = 1091 -INSERT_INFO = 1092 -UPDATE_TABLE_USED = 1093 -NO_SUCH_THREAD = 1094 -KILL_DENIED_ERROR = 1095 -NO_TABLES_USED = 1096 -TOO_BIG_SET = 1097 -NO_UNIQUE_LOGFILE = 1098 -TABLE_NOT_LOCKED_FOR_WRITE = 1099 -TABLE_NOT_LOCKED = 1100 -BLOB_CANT_HAVE_DEFAULT = 1101 -WRONG_DB_NAME = 1102 -WRONG_TABLE_NAME = 1103 -TOO_BIG_SELECT = 1104 -UNKNOWN_ERROR = 1105 -UNKNOWN_PROCEDURE = 1106 -WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 -WRONG_PARAMETERS_TO_PROCEDURE = 1108 -UNKNOWN_TABLE = 1109 -FIELD_SPECIFIED_TWICE = 1110 -INVALID_GROUP_FUNC_USE = 1111 -UNSUPPORTED_EXTENSION = 1112 -TABLE_MUST_HAVE_COLUMNS = 1113 -RECORD_FILE_FULL = 1114 -UNKNOWN_CHARACTER_SET = 1115 -TOO_MANY_TABLES = 1116 -TOO_MANY_FIELDS = 1117 -TOO_BIG_ROWSIZE = 1118 -STACK_OVERRUN = 1119 -WRONG_OUTER_JOIN = 1120 -NULL_COLUMN_IN_INDEX = 1121 -CANT_FIND_UDF = 1122 -CANT_INITIALIZE_UDF = 1123 -UDF_NO_PATHS = 1124 -UDF_EXISTS = 1125 -CANT_OPEN_LIBRARY = 1126 -CANT_FIND_DL_ENTRY = 1127 -FUNCTION_NOT_DEFINED = 1128 -HOST_IS_BLOCKED = 1129 -HOST_NOT_PRIVILEGED = 1130 -PASSWORD_ANONYMOUS_USER = 1131 -PASSWORD_NOT_ALLOWED = 1132 -PASSWORD_NO_MATCH = 1133 -UPDATE_INFO = 1134 -CANT_CREATE_THREAD = 1135 -WRONG_VALUE_COUNT_ON_ROW = 1136 -CANT_REOPEN_TABLE = 1137 -INVALID_USE_OF_NULL = 1138 -REGEXP_ERROR = 1139 -MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 -NONEXISTING_GRANT = 1141 -TABLEACCESS_DENIED_ERROR = 1142 -COLUMNACCESS_DENIED_ERROR = 1143 -ILLEGAL_GRANT_FOR_TABLE = 1144 -GRANT_WRONG_HOST_OR_USER = 1145 -NO_SUCH_TABLE = 1146 -NONEXISTING_TABLE_GRANT = 1147 -NOT_ALLOWED_COMMAND = 1148 -SYNTAX_ERROR = 1149 -DELAYED_CANT_CHANGE_LOCK = 1150 -TOO_MANY_DELAYED_THREADS = 1151 -ABORTING_CONNECTION = 1152 -NET_PACKET_TOO_LARGE = 1153 -NET_READ_ERROR_FROM_PIPE = 1154 -NET_FCNTL_ERROR = 1155 -NET_PACKETS_OUT_OF_ORDER = 1156 -NET_UNCOMPRESS_ERROR = 1157 -NET_READ_ERROR = 1158 -NET_READ_INTERRUPTED = 1159 -NET_ERROR_ON_WRITE = 1160 -NET_WRITE_INTERRUPTED = 1161 -TOO_LONG_STRING = 1162 -TABLE_CANT_HANDLE_BLOB = 1163 -TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 -DELAYED_INSERT_TABLE_LOCKED = 1165 -WRONG_COLUMN_NAME = 1166 -WRONG_KEY_COLUMN = 1167 -WRONG_MRG_TABLE = 1168 -DUP_UNIQUE = 1169 -BLOB_KEY_WITHOUT_LENGTH = 1170 -PRIMARY_CANT_HAVE_NULL = 1171 -TOO_MANY_ROWS = 1172 -REQUIRES_PRIMARY_KEY = 1173 -NO_RAID_COMPILED = 1174 -UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 -KEY_DOES_NOT_EXITS = 1176 -CHECK_NO_SUCH_TABLE = 1177 -CHECK_NOT_IMPLEMENTED = 1178 -CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 -ERROR_DURING_COMMIT = 1180 -ERROR_DURING_ROLLBACK = 1181 -ERROR_DURING_FLUSH_LOGS = 1182 -ERROR_DURING_CHECKPOINT = 1183 -NEW_ABORTING_CONNECTION = 1184 -DUMP_NOT_IMPLEMENTED = 1185 -FLUSH_MASTER_BINLOG_CLOSED = 1186 -INDEX_REBUILD = 1187 -MASTER = 1188 -MASTER_NET_READ = 1189 -MASTER_NET_WRITE = 1190 -FT_MATCHING_KEY_NOT_FOUND = 1191 -LOCK_OR_ACTIVE_TRANSACTION = 1192 -UNKNOWN_SYSTEM_VARIABLE = 1193 -CRASHED_ON_USAGE = 1194 -CRASHED_ON_REPAIR = 1195 -WARNING_NOT_COMPLETE_ROLLBACK = 1196 -TRANS_CACHE_FULL = 1197 -SLAVE_MUST_STOP = 1198 -SLAVE_NOT_RUNNING = 1199 -BAD_SLAVE = 1200 -MASTER_INFO = 1201 -SLAVE_THREAD = 1202 -TOO_MANY_USER_CONNECTIONS = 1203 -SET_CONSTANTS_ONLY = 1204 -LOCK_WAIT_TIMEOUT = 1205 -LOCK_TABLE_FULL = 1206 -READ_ONLY_TRANSACTION = 1207 -DROP_DB_WITH_READ_LOCK = 1208 -CREATE_DB_WITH_READ_LOCK = 1209 -WRONG_ARGUMENTS = 1210 -NO_PERMISSION_TO_CREATE_USER = 1211 -UNION_TABLES_IN_DIFFERENT_DIR = 1212 -LOCK_DEADLOCK = 1213 -TABLE_CANT_HANDLE_FT = 1214 -CANNOT_ADD_FOREIGN = 1215 -NO_REFERENCED_ROW = 1216 -ROW_IS_REFERENCED = 1217 -CONNECT_TO_MASTER = 1218 -QUERY_ON_MASTER = 1219 -ERROR_WHEN_EXECUTING_COMMAND = 1220 -WRONG_USAGE = 1221 -WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 -CANT_UPDATE_WITH_READLOCK = 1223 -MIXING_NOT_ALLOWED = 1224 -DUP_ARGUMENT = 1225 -USER_LIMIT_REACHED = 1226 -SPECIFIC_ACCESS_DENIED_ERROR = 1227 -LOCAL_VARIABLE = 1228 -GLOBAL_VARIABLE = 1229 -NO_DEFAULT = 1230 -WRONG_VALUE_FOR_VAR = 1231 -WRONG_TYPE_FOR_VAR = 1232 -VAR_CANT_BE_READ = 1233 -CANT_USE_OPTION_HERE = 1234 -NOT_SUPPORTED_YET = 1235 -MASTER_FATAL_ERROR_READING_BINLOG = 1236 -SLAVE_IGNORED_TABLE = 1237 -INCORRECT_GLOBAL_LOCAL_VAR = 1238 -WRONG_FK_DEF = 1239 -KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 -OPERAND_COLUMNS = 1241 -SUBQUERY_NO_1_ROW = 1242 -UNKNOWN_STMT_HANDLER = 1243 -CORRUPT_HELP_DB = 1244 -CYCLIC_REFERENCE = 1245 -AUTO_CONVERT = 1246 -ILLEGAL_REFERENCE = 1247 -DERIVED_MUST_HAVE_ALIAS = 1248 -SELECT_REDUCED = 1249 -TABLENAME_NOT_ALLOWED_HERE = 1250 -NOT_SUPPORTED_AUTH_MODE = 1251 -SPATIAL_CANT_HAVE_NULL = 1252 -COLLATION_CHARSET_MISMATCH = 1253 -SLAVE_WAS_RUNNING = 1254 -SLAVE_WAS_NOT_RUNNING = 1255 -TOO_BIG_FOR_UNCOMPRESS = 1256 -ZLIB_Z_MEM_ERROR = 1257 -ZLIB_Z_BUF_ERROR = 1258 -ZLIB_Z_DATA_ERROR = 1259 -CUT_VALUE_GROUP_CONCAT = 1260 -WARN_TOO_FEW_RECORDS = 1261 -WARN_TOO_MANY_RECORDS = 1262 -WARN_NULL_TO_NOTNULL = 1263 -WARN_DATA_OUT_OF_RANGE = 1264 -WARN_DATA_TRUNCATED = 1265 -WARN_USING_OTHER_HANDLER = 1266 -CANT_AGGREGATE_2COLLATIONS = 1267 -DROP_USER = 1268 -REVOKE_GRANTS = 1269 -CANT_AGGREGATE_3COLLATIONS = 1270 -CANT_AGGREGATE_NCOLLATIONS = 1271 -VARIABLE_IS_NOT_STRUCT = 1272 -UNKNOWN_COLLATION = 1273 -SLAVE_IGNORED_SSL_PARAMS = 1274 -SERVER_IS_IN_SECURE_AUTH_MODE = 1275 -WARN_FIELD_RESOLVED = 1276 -BAD_SLAVE_UNTIL_COND = 1277 -MISSING_SKIP_SLAVE = 1278 -UNTIL_COND_IGNORED = 1279 -WRONG_NAME_FOR_INDEX = 1280 -WRONG_NAME_FOR_CATALOG = 1281 -WARN_QC_RESIZE = 1282 -BAD_FT_COLUMN = 1283 -UNKNOWN_KEY_CACHE = 1284 -WARN_HOSTNAME_WONT_WORK = 1285 -UNKNOWN_STORAGE_ENGINE = 1286 -WARN_DEPRECATED_SYNTAX = 1287 -NON_UPDATABLE_TABLE = 1288 -FEATURE_DISABLED = 1289 -OPTION_PREVENTS_STATEMENT = 1290 -DUPLICATED_VALUE_IN_TYPE = 1291 -TRUNCATED_WRONG_VALUE = 1292 -TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 -INVALID_ON_UPDATE = 1294 -UNSUPPORTED_PS = 1295 -GET_ERRMSG = 1296 -GET_TEMPORARY_ERRMSG = 1297 -UNKNOWN_TIME_ZONE = 1298 -WARN_INVALID_TIMESTAMP = 1299 -INVALID_CHARACTER_STRING = 1300 -WARN_ALLOWED_PACKET_OVERFLOWED = 1301 -CONFLICTING_DECLARATIONS = 1302 -SP_NO_RECURSIVE_CREATE = 1303 -SP_ALREADY_EXISTS = 1304 -SP_DOES_NOT_EXIST = 1305 -SP_DROP_FAILED = 1306 -SP_STORE_FAILED = 1307 -SP_LILABEL_MISMATCH = 1308 -SP_LABEL_REDEFINE = 1309 -SP_LABEL_MISMATCH = 1310 -SP_UNINIT_VAR = 1311 -SP_BADSELECT = 1312 -SP_BADRETURN = 1313 -SP_BADSTATEMENT = 1314 -UPDATE_LOG_DEPRECATED_IGNORED = 1315 -UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 -QUERY_INTERRUPTED = 1317 -SP_WRONG_NO_OF_ARGS = 1318 -SP_COND_MISMATCH = 1319 -SP_NORETURN = 1320 -SP_NORETURNEND = 1321 -SP_BAD_CURSOR_QUERY = 1322 -SP_BAD_CURSOR_SELECT = 1323 -SP_CURSOR_MISMATCH = 1324 -SP_CURSOR_ALREADY_OPEN = 1325 -SP_CURSOR_NOT_OPEN = 1326 -SP_UNDECLARED_VAR = 1327 -SP_WRONG_NO_OF_FETCH_ARGS = 1328 -SP_FETCH_NO_DATA = 1329 -SP_DUP_PARAM = 1330 -SP_DUP_VAR = 1331 -SP_DUP_COND = 1332 -SP_DUP_CURS = 1333 -SP_CANT_ALTER = 1334 -SP_SUBSELECT_NYI = 1335 -STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 -SP_VARCOND_AFTER_CURSHNDLR = 1337 -SP_CURSOR_AFTER_HANDLER = 1338 -SP_CASE_NOT_FOUND = 1339 -FPARSER_TOO_BIG_FILE = 1340 -FPARSER_BAD_HEADER = 1341 -FPARSER_EOF_IN_COMMENT = 1342 -FPARSER_ERROR_IN_PARAMETER = 1343 -FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 -VIEW_NO_EXPLAIN = 1345 -FRM_UNKNOWN_TYPE = 1346 -WRONG_OBJECT = 1347 -NONUPDATEABLE_COLUMN = 1348 -VIEW_SELECT_DERIVED = 1349 -VIEW_SELECT_CLAUSE = 1350 -VIEW_SELECT_VARIABLE = 1351 -VIEW_SELECT_TMPTABLE = 1352 -VIEW_WRONG_LIST = 1353 -WARN_VIEW_MERGE = 1354 -WARN_VIEW_WITHOUT_KEY = 1355 -VIEW_INVALID = 1356 -SP_NO_DROP_SP = 1357 -SP_GOTO_IN_HNDLR = 1358 -TRG_ALREADY_EXISTS = 1359 -TRG_DOES_NOT_EXIST = 1360 -TRG_ON_VIEW_OR_TEMP_TABLE = 1361 -TRG_CANT_CHANGE_ROW = 1362 -TRG_NO_SUCH_ROW_IN_TRG = 1363 -NO_DEFAULT_FOR_FIELD = 1364 -DIVISION_BY_ZERO = 1365 -TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 -ILLEGAL_VALUE_FOR_TYPE = 1367 -VIEW_NONUPD_CHECK = 1368 -VIEW_CHECK_FAILED = 1369 -PROCACCESS_DENIED_ERROR = 1370 -RELAY_LOG_FAIL = 1371 -PASSWD_LENGTH = 1372 -UNKNOWN_TARGET_BINLOG = 1373 -IO_ERR_LOG_INDEX_READ = 1374 -BINLOG_PURGE_PROHIBITED = 1375 -FSEEK_FAIL = 1376 -BINLOG_PURGE_FATAL_ERR = 1377 -LOG_IN_USE = 1378 -LOG_PURGE_UNKNOWN_ERR = 1379 -RELAY_LOG_INIT = 1380 -NO_BINARY_LOGGING = 1381 -RESERVED_SYNTAX = 1382 -WSAS_FAILED = 1383 -DIFF_GROUPS_PROC = 1384 -NO_GROUP_FOR_PROC = 1385 -ORDER_WITH_PROC = 1386 -LOGGING_PROHIBIT_CHANGING_OF = 1387 -NO_FILE_MAPPING = 1388 -WRONG_MAGIC = 1389 -PS_MANY_PARAM = 1390 -KEY_PART_0 = 1391 -VIEW_CHECKSUM = 1392 -VIEW_MULTIUPDATE = 1393 -VIEW_NO_INSERT_FIELD_LIST = 1394 -VIEW_DELETE_MERGE_VIEW = 1395 -CANNOT_USER = 1396 -XAER_NOTA = 1397 -XAER_INVAL = 1398 -XAER_RMFAIL = 1399 -XAER_OUTSIDE = 1400 -XAER_RMERR = 1401 -XA_RBROLLBACK = 1402 -NONEXISTING_PROC_GRANT = 1403 -PROC_AUTO_GRANT_FAIL = 1404 -PROC_AUTO_REVOKE_FAIL = 1405 -DATA_TOO_LONG = 1406 -SP_BAD_SQLSTATE = 1407 -STARTUP = 1408 -LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 -CANT_CREATE_USER_WITH_GRANT = 1410 -WRONG_VALUE_FOR_TYPE = 1411 -TABLE_DEF_CHANGED = 1412 -SP_DUP_HANDLER = 1413 -SP_NOT_VAR_ARG = 1414 -SP_NO_RETSET = 1415 -CANT_CREATE_GEOMETRY_OBJECT = 1416 -FAILED_ROUTINE_BREAK_BINLOG = 1417 -BINLOG_UNSAFE_ROUTINE = 1418 -BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 -EXEC_STMT_WITH_OPEN_CURSOR = 1420 -STMT_HAS_NO_OPEN_CURSOR = 1421 -COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 -NO_DEFAULT_FOR_VIEW_FIELD = 1423 -SP_NO_RECURSION = 1424 -TOO_BIG_SCALE = 1425 -TOO_BIG_PRECISION = 1426 -M_BIGGER_THAN_D = 1427 -WRONG_LOCK_OF_SYSTEM_TABLE = 1428 -CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 -QUERY_ON_FOREIGN_DATA_SOURCE = 1430 -FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 -FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 -FOREIGN_DATA_STRING_INVALID = 1433 -CANT_CREATE_FEDERATED_TABLE = 1434 -TRG_IN_WRONG_SCHEMA = 1435 -STACK_OVERRUN_NEED_MORE = 1436 -TOO_LONG_BODY = 1437 -WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 -TOO_BIG_DISPLAYWIDTH = 1439 -XAER_DUPID = 1440 -DATETIME_FUNCTION_OVERFLOW = 1441 -CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 -VIEW_PREVENT_UPDATE = 1443 -PS_NO_RECURSION = 1444 -SP_CANT_SET_AUTOCOMMIT = 1445 -MALFORMED_DEFINER = 1446 -VIEW_FRM_NO_USER = 1447 -VIEW_OTHER_USER = 1448 -NO_SUCH_USER = 1449 -FORBID_SCHEMA_CHANGE = 1450 -ROW_IS_REFERENCED_2 = 1451 -NO_REFERENCED_ROW_2 = 1452 -SP_BAD_VAR_SHADOW = 1453 -TRG_NO_DEFINER = 1454 -OLD_FILE_FORMAT = 1455 -SP_RECURSION_LIMIT = 1456 -SP_PROC_TABLE_CORRUPT = 1457 -SP_WRONG_NAME = 1458 -TABLE_NEEDS_UPGRADE = 1459 -SP_NO_AGGREGATE = 1460 -MAX_PREPARED_STMT_COUNT_REACHED = 1461 -VIEW_RECURSIVE = 1462 -NON_GROUPING_FIELD_USED = 1463 -TABLE_CANT_HANDLE_SPKEYS = 1464 -NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 -USERNAME = 1466 -HOSTNAME = 1467 -WRONG_STRING_LENGTH = 1468 -ERROR_LAST = 1468 - -# https://github.com/PyMySQL/PyMySQL/issues/607 -CONSTRAINT_FAILED = 4025 diff --git a/utill/db/pymysql/constants/FIELD_TYPE.py b/utill/db/pymysql/constants/FIELD_TYPE.py deleted file mode 100644 index 51bd514..0000000 --- a/utill/db/pymysql/constants/FIELD_TYPE.py +++ /dev/null @@ -1,33 +0,0 @@ - - -DECIMAL = 0 -TINY = 1 -SHORT = 2 -LONG = 3 -FLOAT = 4 -DOUBLE = 5 -NULL = 6 -TIMESTAMP = 7 -LONGLONG = 8 -INT24 = 9 -DATE = 10 -TIME = 11 -DATETIME = 12 -YEAR = 13 -NEWDATE = 14 -VARCHAR = 15 -BIT = 16 -JSON = 245 -NEWDECIMAL = 246 -ENUM = 247 -SET = 248 -TINY_BLOB = 249 -MEDIUM_BLOB = 250 -LONG_BLOB = 251 -BLOB = 252 -VAR_STRING = 253 -STRING = 254 -GEOMETRY = 255 - -CHAR = TINY -INTERVAL = ENUM diff --git a/utill/db/pymysql/constants/FLAG.py b/utill/db/pymysql/constants/FLAG.py deleted file mode 100644 index f9ebfad..0000000 --- a/utill/db/pymysql/constants/FLAG.py +++ /dev/null @@ -1,15 +0,0 @@ -NOT_NULL = 1 -PRI_KEY = 2 -UNIQUE_KEY = 4 -MULTIPLE_KEY = 8 -BLOB = 16 -UNSIGNED = 32 -ZEROFILL = 64 -BINARY = 128 -ENUM = 256 -AUTO_INCREMENT = 512 -TIMESTAMP = 1024 -SET = 2048 -PART_KEY = 16384 -GROUP = 32767 -UNIQUE = 65536 diff --git a/utill/db/pymysql/constants/SERVER_STATUS.py b/utill/db/pymysql/constants/SERVER_STATUS.py deleted file mode 100644 index 6f5d566..0000000 --- a/utill/db/pymysql/constants/SERVER_STATUS.py +++ /dev/null @@ -1,11 +0,0 @@ - -SERVER_STATUS_IN_TRANS = 1 -SERVER_STATUS_AUTOCOMMIT = 2 -SERVER_MORE_RESULTS_EXISTS = 8 -SERVER_QUERY_NO_GOOD_INDEX_USED = 16 -SERVER_QUERY_NO_INDEX_USED = 32 -SERVER_STATUS_CURSOR_EXISTS = 64 -SERVER_STATUS_LAST_ROW_SENT = 128 -SERVER_STATUS_DB_DROPPED = 256 -SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512 -SERVER_STATUS_METADATA_CHANGED = 1024 diff --git a/utill/db/pymysql/constants/__init__.py b/utill/db/pymysql/constants/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utill/db/pymysql/converters.py b/utill/db/pymysql/converters.py deleted file mode 100644 index ce2be06..0000000 --- a/utill/db/pymysql/converters.py +++ /dev/null @@ -1,411 +0,0 @@ -from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr - -import datetime -from decimal import Decimal -import re -import time - -from .constants import FIELD_TYPE, FLAG -from .charset import charset_by_id, charset_to_encoding - - -def escape_item(val, charset, mapping=None): - if mapping is None: - mapping = encoders - encoder = mapping.get(type(val)) - - # Fallback to default when no encoder found - if not encoder: - try: - encoder = mapping[text_type] - except KeyError: - raise TypeError("no default type converter defined") - - if encoder in (escape_dict, escape_sequence): - val = encoder(val, charset, mapping) - else: - val = encoder(val, mapping) - return val - -def escape_dict(val, charset, mapping=None): - n = {} - for k, v in val.items(): - quoted = escape_item(v, charset, mapping) - n[k] = quoted - return n - -def escape_sequence(val, charset, mapping=None): - n = [] - for item in val: - quoted = escape_item(item, charset, mapping) - n.append(quoted) - return "(" + ",".join(n) + ")" - -def escape_set(val, charset, mapping=None): - return ','.join([escape_item(x, charset, mapping) for x in val]) - -def escape_bool(value, mapping=None): - return str(int(value)) - -def escape_object(value, mapping=None): - return str(value) - -def escape_int(value, mapping=None): - return str(value) - -def escape_float(value, mapping=None): - return ('%.15g' % value) - -_escape_table = [unichr(x) for x in range(128)] -_escape_table[0] = u'\\0' -_escape_table[ord('\\')] = u'\\\\' -_escape_table[ord('\n')] = u'\\n' -_escape_table[ord('\r')] = u'\\r' -_escape_table[ord('\032')] = u'\\Z' -_escape_table[ord('"')] = u'\\"' -_escape_table[ord("'")] = u"\\'" - -def _escape_unicode(value, mapping=None): - """escapes *value* without adding quote. - - Value should be unicode - """ - return value.translate(_escape_table) - -if PY2: - def escape_string(value, mapping=None): - """escape_string escapes *value* but not surround it with quotes. - - Value should be bytes or unicode. - """ - if isinstance(value, unicode): - return _escape_unicode(value) - assert isinstance(value, (bytes, bytearray)) - value = value.replace('\\', '\\\\') - value = value.replace('\0', '\\0') - value = value.replace('\n', '\\n') - value = value.replace('\r', '\\r') - value = value.replace('\032', '\\Z') - value = value.replace("'", "\\'") - value = value.replace('"', '\\"') - return value - - def escape_bytes_prefixed(value, mapping=None): - assert isinstance(value, (bytes, bytearray)) - return b"_binary'%s'" % escape_string(value) - - def escape_bytes(value, mapping=None): - assert isinstance(value, (bytes, bytearray)) - return b"'%s'" % escape_string(value) - -else: - escape_string = _escape_unicode - - # On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow. - # (fixed in Python 3.6, http://bugs.python.org/issue24870) - # Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff. - # We can escape special chars and surrogateescape at once. - _escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)] - - def escape_bytes_prefixed(value, mapping=None): - return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table) - - def escape_bytes(value, mapping=None): - return "'%s'" % value.decode('latin1').translate(_escape_bytes_table) - - -def escape_unicode(value, mapping=None): - return u"'%s'" % _escape_unicode(value) - -def escape_str(value, mapping=None): - return "'%s'" % escape_string(str(value), mapping) - -def escape_None(value, mapping=None): - return 'NULL' - -def escape_timedelta(obj, mapping=None): - seconds = int(obj.seconds) % 60 - minutes = int(obj.seconds // 60) % 60 - hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24 - if obj.microseconds: - fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'" - else: - fmt = "'{0:02d}:{1:02d}:{2:02d}'" - return fmt.format(hours, minutes, seconds, obj.microseconds) - -def escape_time(obj, mapping=None): - if obj.microsecond: - fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" - else: - fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'" - return fmt.format(obj) - -def escape_datetime(obj, mapping=None): - if obj.microsecond: - fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" - else: - fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'" - return fmt.format(obj) - -def escape_date(obj, mapping=None): - fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'" - return fmt.format(obj) - -def escape_struct_time(obj, mapping=None): - return escape_datetime(datetime.datetime(*obj[:6])) - -def _convert_second_fraction(s): - if not s: - return 0 - # Pad zeros to ensure the fraction length in microseconds - s = s.ljust(6, '0') - return int(s[:6]) - -DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") - - -def convert_datetime(obj): - """Returns a DATETIME or TIMESTAMP column value as a datetime object: - - >>> datetime_or_None('2007-02-25 23:06:20') - datetime.datetime(2007, 2, 25, 23, 6, 20) - >>> datetime_or_None('2007-02-25T23:06:20') - datetime.datetime(2007, 2, 25, 23, 6, 20) - - Illegal values are returned as None: - - >>> datetime_or_None('2007-02-31T23:06:20') is None - True - >>> datetime_or_None('0000-00-00 00:00:00') is None - True - - """ - if not PY2 and isinstance(obj, (bytes, bytearray)): - obj = obj.decode('ascii') - - m = DATETIME_RE.match(obj) - if not m: - return convert_date(obj) - - try: - groups = list(m.groups()) - groups[-1] = _convert_second_fraction(groups[-1]) - return datetime.datetime(*[ int(x) for x in groups ]) - except ValueError: - return convert_date(obj) - -TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") - - -def convert_timedelta(obj): - """Returns a TIME column as a timedelta object: - - >>> timedelta_or_None('25:06:17') - datetime.timedelta(1, 3977) - >>> timedelta_or_None('-25:06:17') - datetime.timedelta(-2, 83177) - - Illegal values are returned as None: - - >>> timedelta_or_None('random crap') is None - True - - Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but - can accept values as (+|-)DD HH:MM:SS. The latter format will not - be parsed correctly by this function. - """ - if not PY2 and isinstance(obj, (bytes, bytearray)): - obj = obj.decode('ascii') - - m = TIMEDELTA_RE.match(obj) - if not m: - return obj - - try: - groups = list(m.groups()) - groups[-1] = _convert_second_fraction(groups[-1]) - negate = -1 if groups[0] else 1 - hours, minutes, seconds, microseconds = groups[1:] - - tdelta = datetime.timedelta( - hours = int(hours), - minutes = int(minutes), - seconds = int(seconds), - microseconds = int(microseconds) - ) * negate - return tdelta - except ValueError: - return obj - -TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") - - -def convert_time(obj): - """Returns a TIME column as a time object: - - >>> time_or_None('15:06:17') - datetime.time(15, 6, 17) - - Illegal values are returned as None: - - >>> time_or_None('-25:06:17') is None - True - >>> time_or_None('random crap') is None - True - - Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but - can accept values as (+|-)DD HH:MM:SS. The latter format will not - be parsed correctly by this function. - - Also note that MySQL's TIME column corresponds more closely to - Python's timedelta and not time. However if you want TIME columns - to be treated as time-of-day and not a time offset, then you can - use set this function as the converter for FIELD_TYPE.TIME. - """ - if not PY2 and isinstance(obj, (bytes, bytearray)): - obj = obj.decode('ascii') - - m = TIME_RE.match(obj) - if not m: - return obj - - try: - groups = list(m.groups()) - groups[-1] = _convert_second_fraction(groups[-1]) - hours, minutes, seconds, microseconds = groups - return datetime.time(hour=int(hours), minute=int(minutes), - second=int(seconds), microsecond=int(microseconds)) - except ValueError: - return obj - - -def convert_date(obj): - """Returns a DATE column as a date object: - - >>> date_or_None('2007-02-26') - datetime.date(2007, 2, 26) - - Illegal values are returned as None: - - >>> date_or_None('2007-02-31') is None - True - >>> date_or_None('0000-00-00') is None - True - - """ - if not PY2 and isinstance(obj, (bytes, bytearray)): - obj = obj.decode('ascii') - try: - return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) - except ValueError: - return obj - - -def convert_mysql_timestamp(timestamp): - """Convert a MySQL TIMESTAMP to a Timestamp object. - - MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME: - - >>> mysql_timestamp_converter('2007-02-25 22:32:17') - datetime.datetime(2007, 2, 25, 22, 32, 17) - - MySQL < 4.1 uses a big string of numbers: - - >>> mysql_timestamp_converter('20070225223217') - datetime.datetime(2007, 2, 25, 22, 32, 17) - - Illegal values are returned as None: - - >>> mysql_timestamp_converter('2007-02-31 22:32:17') is None - True - >>> mysql_timestamp_converter('00000000000000') is None - True - - """ - if not PY2 and isinstance(timestamp, (bytes, bytearray)): - timestamp = timestamp.decode('ascii') - if timestamp[4] == '-': - return convert_datetime(timestamp) - timestamp += "0"*(14-len(timestamp)) # padding - year, month, day, hour, minute, second = \ - int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \ - int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]) - try: - return datetime.datetime(year, month, day, hour, minute, second) - except ValueError: - return timestamp - -def convert_set(s): - if isinstance(s, (bytes, bytearray)): - return set(s.split(b",")) - return set(s.split(",")) - - -def through(x): - return x - - -#def convert_bit(b): -# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes -# return struct.unpack(">Q", b)[0] -# -# the snippet above is right, but MySQLdb doesn't process bits, -# so we shouldn't either -convert_bit = through - - -encoders = { - bool: escape_bool, - int: escape_int, - long_type: escape_int, - float: escape_float, - str: escape_str, - text_type: escape_unicode, - tuple: escape_sequence, - list: escape_sequence, - set: escape_sequence, - frozenset: escape_sequence, - dict: escape_dict, - type(None): escape_None, - datetime.date: escape_date, - datetime.datetime: escape_datetime, - datetime.timedelta: escape_timedelta, - datetime.time: escape_time, - time.struct_time: escape_struct_time, - Decimal: escape_object, -} - -if not PY2 or JYTHON or IRONPYTHON: - encoders[bytes] = escape_bytes - -decoders = { - FIELD_TYPE.BIT: convert_bit, - FIELD_TYPE.TINY: int, - FIELD_TYPE.SHORT: int, - FIELD_TYPE.LONG: int, - FIELD_TYPE.FLOAT: float, - FIELD_TYPE.DOUBLE: float, - FIELD_TYPE.LONGLONG: int, - FIELD_TYPE.INT24: int, - FIELD_TYPE.YEAR: int, - FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp, - FIELD_TYPE.DATETIME: convert_datetime, - FIELD_TYPE.TIME: convert_timedelta, - FIELD_TYPE.DATE: convert_date, - FIELD_TYPE.SET: convert_set, - FIELD_TYPE.BLOB: through, - FIELD_TYPE.TINY_BLOB: through, - FIELD_TYPE.MEDIUM_BLOB: through, - FIELD_TYPE.LONG_BLOB: through, - FIELD_TYPE.STRING: through, - FIELD_TYPE.VAR_STRING: through, - FIELD_TYPE.VARCHAR: through, - FIELD_TYPE.DECIMAL: Decimal, - FIELD_TYPE.NEWDECIMAL: Decimal, -} - - -# for MySQLdb compatibility -conversions = encoders.copy() -conversions.update(decoders) -Thing2Literal = escape_str diff --git a/utill/db/pymysql/cursors.py b/utill/db/pymysql/cursors.py deleted file mode 100644 index a6d645d..0000000 --- a/utill/db/pymysql/cursors.py +++ /dev/null @@ -1,536 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function, absolute_import -from functools import partial -import re -import warnings - -from ._compat import range_type, text_type, PY2 -from . import err - - -#: Regular expression for :meth:`Cursor.executemany`. -#: executemany only suports simple bulk insert. -#: You can use it to load large dataset. -RE_INSERT_VALUES = re.compile( - r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)" + - r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" + - r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z", - re.IGNORECASE | re.DOTALL) - - -class Cursor(object): - """ - This is the object you use to interact with the database. - - Do not create an instance of a Cursor yourself. Call - connections.Connection.cursor(). - - See `Cursor `_ in - the specification. - """ - - #: Max statement size which :meth:`executemany` generates. - #: - #: Max size of allowed statement is max_allowed_packet - packet_header_size. - #: Default value of max_allowed_packet is 1048576. - max_stmt_length = 1024000 - - _defer_warnings = False - - def __init__(self, connection): - self.connection = connection - self.description = None - self.rownumber = 0 - self.rowcount = -1 - self.arraysize = 1 - self._executed = None - self._result = None - self._rows = None - self._warnings_handled = False - - def close(self): - """ - Closing a cursor just exhausts all remaining data. - """ - conn = self.connection - if conn is None: - return - try: - while self.nextset(): - pass - finally: - self.connection = None - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - del exc_info - self.close() - - def _get_db(self): - if not self.connection: - raise err.ProgrammingError("Cursor closed") - return self.connection - - def _check_executed(self): - if not self._executed: - raise err.ProgrammingError("execute() first") - - def _conv_row(self, row): - return row - - def setinputsizes(self, *args): - """Does nothing, required by DB API.""" - - def setoutputsizes(self, *args): - """Does nothing, required by DB API.""" - - def _nextset(self, unbuffered=False): - """Get the next query set""" - conn = self._get_db() - current_result = self._result - # for unbuffered queries warnings are only available once whole result has been read - if unbuffered: - self._show_warnings() - if current_result is None or current_result is not conn._result: - return None - if not current_result.has_next: - return None - self._result = None - self._clear_result() - conn.next_result(unbuffered=unbuffered) - self._do_get_result() - return True - - def nextset(self): - return self._nextset(False) - - def _ensure_bytes(self, x, encoding=None): - if isinstance(x, text_type): - x = x.encode(encoding) - elif isinstance(x, (tuple, list)): - x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x) - return x - - def _escape_args(self, args, conn): - ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding) - - if isinstance(args, (tuple, list)): - if PY2: - args = tuple(map(ensure_bytes, args)) - return tuple(conn.literal(arg) for arg in args) - elif isinstance(args, dict): - if PY2: - args = {ensure_bytes(key): ensure_bytes(val) for - (key, val) in args.items()} - return {key: conn.literal(val) for (key, val) in args.items()} - else: - # If it's not a dictionary let's try escaping it anyways. - # Worst case it will throw a Value error - if PY2: - args = ensure_bytes(args) - return conn.escape(args) - - def mogrify(self, query, args=None): - """ - Returns the exact string that is sent to the database by calling the - execute() method. - - This method follows the extension to the DB API 2.0 followed by Psycopg. - """ - conn = self._get_db() - if PY2: # Use bytes on Python 2 always - query = self._ensure_bytes(query, encoding=conn.encoding) - - if args is not None: - query = query % self._escape_args(args, conn) - - return query - - def execute(self, query, args=None): - """Execute a query - - :param str query: Query to execute. - - :param args: parameters used with query. (optional) - :type args: tuple, list or dict - - :return: Number of affected rows - :rtype: int - - If args is a list or tuple, %s can be used as a placeholder in the query. - If args is a dict, %(name)s can be used as a placeholder in the query. - """ - while self.nextset(): - pass - - query = self.mogrify(query, args) - - result = self._query(query) - self._executed = query - return result - - def executemany(self, query, args): - # type: (str, list) -> int - """Run several data against one query - - :param query: query to execute on server - :param args: Sequence of sequences or mappings. It is used as parameter. - :return: Number of rows affected, if any. - - This method improves performance on multiple-row INSERT and - REPLACE. Otherwise it is equivalent to looping over args with - execute(). - """ - if not args: - return - - m = RE_INSERT_VALUES.match(query) - if m: - q_prefix = m.group(1) % () - q_values = m.group(2).rstrip() - q_postfix = m.group(3) or '' - assert q_values[0] == '(' and q_values[-1] == ')' - return self._do_execute_many(q_prefix, q_values, q_postfix, args, - self.max_stmt_length, - self._get_db().encoding) - - self.rowcount = sum(self.execute(query, arg) for arg in args) - return self.rowcount - - def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding): - conn = self._get_db() - escape = self._escape_args - if isinstance(prefix, text_type): - prefix = prefix.encode(encoding) - if PY2 and isinstance(values, text_type): - values = values.encode(encoding) - if isinstance(postfix, text_type): - postfix = postfix.encode(encoding) - sql = bytearray(prefix) - args = iter(args) - v = values % escape(next(args), conn) - if isinstance(v, text_type): - if PY2: - v = v.encode(encoding) - else: - v = v.encode(encoding, 'surrogateescape') - sql += v - rows = 0 - for arg in args: - v = values % escape(arg, conn) - if isinstance(v, text_type): - if PY2: - v = v.encode(encoding) - else: - v = v.encode(encoding, 'surrogateescape') - if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length: - rows += self.execute(sql + postfix) - sql = bytearray(prefix) - else: - sql += b',' - sql += v - rows += self.execute(sql + postfix) - self.rowcount = rows - return rows - - def callproc(self, procname, args=()): - """Execute stored procedure procname with args - - procname -- string, name of procedure to execute on server - - args -- Sequence of parameters to use with procedure - - Returns the original args. - - Compatibility warning: PEP-249 specifies that any modified - parameters must be returned. This is currently impossible - as they are only available by storing them in a server - variable and then retrieved by a query. Since stored - procedures return zero or more result sets, there is no - reliable way to get at OUT or INOUT parameters via callproc. - The server variables are named @_procname_n, where procname - is the parameter above and n is the position of the parameter - (from zero). Once all result sets generated by the procedure - have been fetched, you can issue a SELECT @_procname_0, ... - query using .execute() to get any OUT or INOUT values. - - Compatibility warning: The act of calling a stored procedure - itself creates an empty result set. This appears after any - result sets generated by the procedure. This is non-standard - behavior with respect to the DB-API. Be sure to use nextset() - to advance through all result sets; otherwise you may get - disconnected. - """ - conn = self._get_db() - if args: - fmt = '@_{0}_%d=%s'.format(procname) - self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg)) - for index, arg in enumerate(args))) - self.nextset() - - q = "CALL %s(%s)" % (procname, - ','.join(['@_%s_%d' % (procname, i) - for i in range_type(len(args))])) - self._query(q) - self._executed = q - return args - - def fetchone(self): - """Fetch the next row""" - self._check_executed() - if self._rows is None or self.rownumber >= len(self._rows): - return None - result = self._rows[self.rownumber] - self.rownumber += 1 - return result - - def fetchmany(self, size=None): - """Fetch several rows""" - self._check_executed() - if self._rows is None: - return () - end = self.rownumber + (size or self.arraysize) - result = self._rows[self.rownumber:end] - self.rownumber = min(end, len(self._rows)) - return result - - def fetchall(self): - """Fetch all the rows""" - self._check_executed() - if self._rows is None: - return () - if self.rownumber: - result = self._rows[self.rownumber:] - else: - result = self._rows - self.rownumber = len(self._rows) - return result - - def scroll(self, value, mode='relative'): - self._check_executed() - if mode == 'relative': - r = self.rownumber + value - elif mode == 'absolute': - r = value - else: - raise err.ProgrammingError("unknown scroll mode %s" % mode) - - if not (0 <= r < len(self._rows)): - raise IndexError("out of range") - self.rownumber = r - - def _query(self, q): - conn = self._get_db() - self._last_executed = q - self._clear_result() - conn.query(q) - self._do_get_result() - return self.rowcount - - def _clear_result(self): - self.rownumber = 0 - self._result = None - - self.rowcount = 0 - self.description = None - self.lastrowid = None - self._rows = None - - def _do_get_result(self): - conn = self._get_db() - - self._result = result = conn._result - - self.rowcount = result.affected_rows - self.description = result.description - self.lastrowid = result.insert_id - self._rows = result.rows - self._warnings_handled = False - - if not self._defer_warnings: - self._show_warnings() - - def _show_warnings(self): - if self._warnings_handled: - return - self._warnings_handled = True - if self._result and (self._result.has_next or not self._result.warning_count): - return - ws = self._get_db().show_warnings() - if ws is None: - return - for w in ws: - msg = w[-1] - if PY2: - if isinstance(msg, unicode): - msg = msg.encode('utf-8', 'replace') - warnings.warn(err.Warning(*w[1:3]), stacklevel=4) - - def __iter__(self): - return iter(self.fetchone, None) - - Warning = err.Warning - Error = err.Error - InterfaceError = err.InterfaceError - DatabaseError = err.DatabaseError - DataError = err.DataError - OperationalError = err.OperationalError - IntegrityError = err.IntegrityError - InternalError = err.InternalError - ProgrammingError = err.ProgrammingError - NotSupportedError = err.NotSupportedError - - -class DictCursorMixin(object): - # You can override this to use OrderedDict or other dict-like types. - dict_type = dict - - def _do_get_result(self): - super(DictCursorMixin, self)._do_get_result() - fields = [] - if self.description: - for f in self._result.fields: - name = f.name - if name in fields: - name = f.table_name + '.' + name - fields.append(name) - self._fields = fields - - if fields and self._rows: - self._rows = [self._conv_row(r) for r in self._rows] - - def _conv_row(self, row): - if row is None: - return None - return self.dict_type(zip(self._fields, row)) - - -class DictCursor(DictCursorMixin, Cursor): - """A cursor which returns results as a dictionary""" - - -class SSCursor(Cursor): - """ - Unbuffered Cursor, mainly useful for queries that return a lot of data, - or for connections to remote servers over a slow network. - - Instead of copying every row of data into a buffer, this will fetch - rows as needed. The upside of this is the client uses much less memory, - and rows are returned much faster when traveling over a slow network - or if the result set is very big. - - There are limitations, though. The MySQL protocol doesn't support - returning the total number of rows, so the only way to tell how many rows - there are is to iterate over every row returned. Also, it currently isn't - possible to scroll backwards, as only the current row is held in memory. - """ - - _defer_warnings = True - - def _conv_row(self, row): - return row - - def close(self): - conn = self.connection - if conn is None: - return - - if self._result is not None and self._result is conn._result: - self._result._finish_unbuffered_query() - - try: - while self.nextset(): - pass - finally: - self.connection = None - - __del__ = close - - def _query(self, q): - conn = self._get_db() - self._last_executed = q - self._clear_result() - conn.query(q, unbuffered=True) - self._do_get_result() - return self.rowcount - - def nextset(self): - return self._nextset(unbuffered=True) - - def read_next(self): - """Read next row""" - return self._conv_row(self._result._read_rowdata_packet_unbuffered()) - - def fetchone(self): - """Fetch next row""" - self._check_executed() - row = self.read_next() - if row is None: - self._show_warnings() - return None - self.rownumber += 1 - return row - - def fetchall(self): - """ - Fetch all, as per MySQLdb. Pretty useless for large queries, as - it is buffered. See fetchall_unbuffered(), if you want an unbuffered - generator version of this method. - """ - return list(self.fetchall_unbuffered()) - - def fetchall_unbuffered(self): - """ - Fetch all, implemented as a generator, which isn't to standard, - however, it doesn't make sense to return everything in a list, as that - would use ridiculous memory for large result sets. - """ - return iter(self.fetchone, None) - - def __iter__(self): - return self.fetchall_unbuffered() - - def fetchmany(self, size=None): - """Fetch many""" - self._check_executed() - if size is None: - size = self.arraysize - - rows = [] - for i in range_type(size): - row = self.read_next() - if row is None: - self._show_warnings() - break - rows.append(row) - self.rownumber += 1 - return rows - - def scroll(self, value, mode='relative'): - self._check_executed() - - if mode == 'relative': - if value < 0: - raise err.NotSupportedError( - "Backwards scrolling not supported by this cursor") - - for _ in range_type(value): - self.read_next() - self.rownumber += value - elif mode == 'absolute': - if value < self.rownumber: - raise err.NotSupportedError( - "Backwards scrolling not supported by this cursor") - - end = value - self.rownumber - for _ in range_type(end): - self.read_next() - self.rownumber = value - else: - raise err.ProgrammingError("unknown scroll mode %s" % mode) - - -class SSDictCursor(DictCursorMixin, SSCursor): - """An unbuffered cursor, which returns results as a dictionary""" diff --git a/utill/db/pymysql/err.py b/utill/db/pymysql/err.py deleted file mode 100644 index fbc6055..0000000 --- a/utill/db/pymysql/err.py +++ /dev/null @@ -1,109 +0,0 @@ -import struct - -from .constants import ER - - -class MySQLError(Exception): - """Exception related to operation with MySQL.""" - - -class Warning(Warning, MySQLError): - """Exception raised for important warnings like data truncations - while inserting, etc.""" - - -class Error(MySQLError): - """Exception that is the base class of all other error exceptions - (not Warning).""" - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database - interface rather than the database itself.""" - - -class DatabaseError(Error): - """Exception raised for errors that are related to the - database.""" - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the - processed data like division by zero, numeric value out of range, - etc.""" - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's - operation and not necessarily under the control of the programmer, - e.g. an unexpected disconnect occurs, the data source name is not - found, a transaction could not be processed, a memory allocation - error occurred during processing, etc.""" - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database - is affected, e.g. a foreign key check fails, duplicate key, - etc.""" - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal - error, e.g. the cursor is not valid anymore, the transaction is - out of sync, etc.""" - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors, e.g. table not found - or already exists, syntax error in the SQL statement, wrong number - of parameters specified, etc.""" - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used - which is not supported by the database, e.g. requesting a - .rollback() on a connection that does not support transaction or - has transactions turned off.""" - - -error_map = {} - - -def _map_error(exc, *errors): - for error in errors: - error_map[error] = exc - - -_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, - ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, - ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, - ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, - ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION, - ER.WRONG_DB_NAME, ER.WRONG_COLUMN_NAME, - ) -_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, - ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, - ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) -_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW, - ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2, - ER.CANNOT_ADD_FOREIGN, ER.BAD_NULL_ERROR) -_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, - ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) -_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR, - ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR, - ER.COLUMNACCESS_DENIED_ERROR, ER.CONSTRAINT_FAILED, ER.LOCK_DEADLOCK) - - -del _map_error, ER - - -def raise_mysql_exception(data): - errno = struct.unpack('= 2 and value[0] == value[-1] == quote: - return value[1:-1] - return value - - def get(self, section, option): - value = configparser.RawConfigParser.get(self, section, option) - return self.__remove_quotes(value) diff --git a/utill/db/pymysql/protocol.py b/utill/db/pymysql/protocol.py deleted file mode 100644 index 8ccf7c4..0000000 --- a/utill/db/pymysql/protocol.py +++ /dev/null @@ -1,341 +0,0 @@ -# Python implementation of low level MySQL client-server protocol -# http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -from __future__ import print_function -from .charset import MBLENGTH -from ._compat import PY2, range_type -from .constants import FIELD_TYPE, SERVER_STATUS -from . import err -from .util import byte2int - -import struct -import sys - - -DEBUG = False - -NULL_COLUMN = 251 -UNSIGNED_CHAR_COLUMN = 251 -UNSIGNED_SHORT_COLUMN = 252 -UNSIGNED_INT24_COLUMN = 253 -UNSIGNED_INT64_COLUMN = 254 - - -def dump_packet(data): # pragma: no cover - def printable(data): - if 32 <= byte2int(data) < 127: - if isinstance(data, int): - return chr(data) - return data - return '.' - - try: - print("packet length:", len(data)) - for i in range(1, 7): - f = sys._getframe(i) - print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno)) - print("-" * 66) - except ValueError: - pass - dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)] - for d in dump_data: - print(' '.join("{:02X}".format(byte2int(x)) for x in d) + - ' ' * (16 - len(d)) + ' ' * 2 + - ''.join(printable(x) for x in d)) - print("-" * 66) - print() - - -class MysqlPacket(object): - """Representation of a MySQL response packet. - - Provides an interface for reading/parsing the packet results. - """ - __slots__ = ('_position', '_data') - - def __init__(self, data, encoding): - self._position = 0 - self._data = data - - def get_all_data(self): - return self._data - - def read(self, size): - """Read the first 'size' bytes in packet and advance cursor past them.""" - result = self._data[self._position:(self._position+size)] - if len(result) != size: - error = ('Result length not requested length:\n' - 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' - % (size, len(result), self._position, len(self._data))) - if DEBUG: - print(error) - self.dump() - raise AssertionError(error) - self._position += size - return result - - def read_all(self): - """Read all remaining data in the packet. - - (Subsequent read() will return errors.) - """ - result = self._data[self._position:] - self._position = None # ensure no subsequent read() - return result - - def advance(self, length): - """Advance the cursor in data buffer 'length' bytes.""" - new_position = self._position + length - if new_position < 0 or new_position > len(self._data): - raise Exception('Invalid advance amount (%s) for cursor. ' - 'Position=%s' % (length, new_position)) - self._position = new_position - - def rewind(self, position=0): - """Set the position of the data buffer cursor to 'position'.""" - if position < 0 or position > len(self._data): - raise Exception("Invalid position to rewind cursor to: %s." % position) - self._position = position - - def get_bytes(self, position, length=1): - """Get 'length' bytes starting at 'position'. - - Position is start of payload (first four packet header bytes are not - included) starting at index '0'. - - No error checking is done. If requesting outside end of buffer - an empty string (or string shorter than 'length') may be returned! - """ - return self._data[position:(position+length)] - - if PY2: - def read_uint8(self): - result = ord(self._data[self._position]) - self._position += 1 - return result - else: - def read_uint8(self): - result = self._data[self._position] - self._position += 1 - return result - - def read_uint16(self): - result = struct.unpack_from('= 7 - - def is_eof_packet(self): - # http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet - # Caution: \xFE may be LengthEncodedInteger. - # If \xFE is LengthEncodedInteger header, 8bytes followed. - return self._data[0:1] == b'\xfe' and len(self._data) < 9 - - def is_auth_switch_request(self): - # http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest - return self._data[0:1] == b'\xfe' - - def is_extra_auth_data(self): - # https://dev.mysql.com/doc/internals/en/successful-authentication.html - return self._data[0:1] == b'\x01' - - def is_resultset_packet(self): - field_count = ord(self._data[0:1]) - return 1 <= field_count <= 250 - - def is_load_local_packet(self): - return self._data[0:1] == b'\xfb' - - def is_error_packet(self): - return self._data[0:1] == b'\xff' - - def check_error(self): - if self.is_error_packet(): - self.rewind() - self.advance(1) # field_count == error (we already know that) - errno = self.read_uint16() - if DEBUG: print("errno =", errno) - err.raise_mysql_exception(self._data) - - def dump(self): - dump_packet(self._data) - - -class FieldDescriptorPacket(MysqlPacket): - """A MysqlPacket that represents a specific column's metadata in the result. - - Parsing is automatically done and the results are exported via public - attributes on the class such as: db, table_name, name, length, type_code. - """ - - def __init__(self, data, encoding): - MysqlPacket.__init__(self, data, encoding) - self._parse_field_descriptor(encoding) - - def _parse_field_descriptor(self, encoding): - """Parse the 'Field Descriptor' (Metadata) packet. - - This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). - """ - self.catalog = self.read_length_coded_string() - self.db = self.read_length_coded_string() - self.table_name = self.read_length_coded_string().decode(encoding) - self.org_table = self.read_length_coded_string().decode(encoding) - self.name = self.read_length_coded_string().decode(encoding) - self.org_name = self.read_length_coded_string().decode(encoding) - self.charsetnr, self.length, self.type_code, self.flags, self.scale = ( - self.read_struct(' "6000" and name LIKE "%超" ) - - "id","eq",1 表示 id = '1' - - eq 等于 - neq 不等于 - gt 大于 - egt 大于等于 - lt 小于 - elt 小于等于 - like LIKE - """ - self.__where=where - self.__wheres=wheres - # print(len(self.__wheres)) - return self - __field='*' - def field(self,field = "*"): - """设置过滤显示条件 - - 参数 field:str 字符串 - """ - self.__field=field - return self - __limit=[] - def limit(self,offset, length = None): - """设置查询数量 - - 参数 offset:int 起始位置 - - 参数 length:int 查询数量 - """ - self.__limit=[offset,length] - return self - def page(self,pagenow=1, length = 20): - """设置分页查询 - - 参数 pagenow:int 页码 - - 参数 length:int 查询数量 - """ - offset=(pagenow-1)*length - self.__limit=[offset,length] - return self - __order=None - __order1=None - def order(self,strs=None,*strs1): - """设置排序查询 - - 传入方式: - - "id desc" - - "id",'name','appkey','asc' - - "id",'name','appkey' 不包含asc或desc的情况下 默认是desc - - ['id','taskid',{"task_id":"desc"}] - """ - self.__order=strs - self.__order1=strs1 - return self - __group=None - __group1=None - def group(self,strs=None,*strs1): - """设置分组查询 - - 传入方式: - - "id,name" - - "id","name" - """ - self.__group=strs - self.__group1=strs1 - return self - __having=None - def having(self,strs=None): - """用于配合group方法完成从分组的结果中筛选(通常是聚合条件)数据 - - 参数 strs:string 如:"count(time)>3" - """ - self.__having=strs - return self - - __alias=None - def alias(self,strs=None): - """用于设置当前数据表的别名,便于使用其他的连贯操作例如join方法等。 - - 参数 strs:string 默认当前表作为别名 - """ - if strs: - self.__alias=strs - else: - self.__alias=self.__table - return self - __join=None - __joinstr='' - def join(self,strs,on=None,types='INNER'): - """用于根据两个或多个表中的列之间的关系,从这些表中查询数据 - - 参数 strs string 如:"test t1" test表设置别名t1 - - 参数 on string 如:"t1.id=t2.pid" 设置连接条件 - - 参数 types 支持INNER、LEFT、RIGHT、FULL 默认INNER - - """ - joinstr='' - if strs and on: - joinstr=joinstr+types+" JOIN "+strs+" ON "+on+" " - if joinstr: - self.__joinstr=self.__joinstr+joinstr - return self - __distinct=None - def distinct(self,bools=None): - "用于返回唯一不同的值,配合field方法使用生效,消除所有重复的记录,并只获取唯一一次记录。" - self.__distinct=bools - return self - __lock=None - def lock(self,strs=None): - """用于数据库的锁机制,在查询或者执行操作的时候使用 (暂未实现) - - 排他锁 (Exclusive lock) - - 共享锁 (Shared lock) - - 参数 strs 如:True表示自动在生成的SQL语句最后加上FOR UPDATE, - - - """ - # self.__lock=strs - return self - def __setsql(self,types=None,data = {}): - """生成sql语句""" - if types==None: - self.__sql="SELECT" - if self.__distinct and self.__field: - self.__sql=self.__sql+" DISTINCT" - if self.__alias: - self.__sql=self.__sql+" %s FROM %s %s" % (self.__field,self.__table,self.__alias) - else: - self.__sql=self.__sql+" %s FROM %s" % (self.__field,self.__table) - elif types=='count': - self.__sql="SELECT COUNT(%s) FROM %s" % (self.__field,self.__table) - elif types=='max': - self.__sql="SELECT MAX(%s) FROM %s" % (self.__field,self.__table) - elif types=='min': - self.__sql="SELECT MIN(%s) FROM %s" % (self.__field,self.__table) - elif types=='avg': - self.__sql="SELECT AVG(%s) FROM %s" % (self.__field,self.__table) - elif types=='sum': - self.__sql="SELECT SUM(%s) FROM %s" % (self.__field,self.__table) - elif types=='update': - strs='' - for k in data: - if isinstance(data[k],str): - strs=strs+" %s = '%s' ," % (k,self.escape_string(data[k])) - else: - strs=strs+" %s = %s ," % (k,data[k]) - strs=strs[:-1] - self.__sql="UPDATE %s SET %s" % (self.__table,strs) - # print(self.__sql) - elif types=='delete': - self.__sql="DELETE FROM %s" % (self.__table) - elif types=='insert': - if isinstance(data,dict): - strs='' - val='' - for k in data: - strs=strs+"%s," % k - if isinstance(data[k],str): - val=val+"'%s'," % self.escape_string(data[k]) - else: - val=val+"%s," % data[k] - strs=strs[:-1] - val=val[:-1] - self.__sql="INSERT INTO "+str(self.__table)+" ("+strs+") VALUES ("+val+")" - # print(self.__sql) - elif isinstance(data,list): - strs='' - val='(' - for k in data[0]: - strs=strs+" , "+k - for k in data: - for j in k: - if isinstance(k[j],str): - val=val+"'"+str(k[j])+"'," - else: - val=val+str(k[j])+"," - val=val[:-1] - val=val+"),(" - val=val[:-2] - self.__sql="INSERT INTO "+str(self.__table)+" ("+strs[3:]+") VALUES "+val - - if self.__joinstr: - # print(self.__sql) - self.__sql=self.__sql+" "+self.__joinstr - if self.__where: - if isinstance(self.__where,str): - if self.__wheres: - if len(self.__wheres) == 2: - if isinstance(self.__wheres[1],list): - self.__sql=self.__sql + " WHERE %s %s (" % (self.__where,self.__operator(self.__wheres[0])) - for k in self.__wheres[1]: - self.__sql=self.__sql+str(k)+"," - self.__sql=self.__sql[:-1]+")" - else: - self.__sql=self.__sql + " WHERE %s %s '%s'" % (self.__where,self.__operator(self.__wheres[0]),self.__wheres[1]) - elif len(self.__wheres) > 2: - if self.__wheres[0]=='in': - strs=str(self.__wheres[1]) - i=0 - for k in self.__wheres: - if i > 1: - strs=strs+","+str(k) - i=i+1 - self.__sql=self.__sql + " WHERE %s in (%s)" % (self.__where,strs) - else: - self.__sql=self.__sql + " WHERE %s = '%s'" % (self.__where,self.__wheres[0]) - else: - self.__sql=self.__sql + " WHERE %s" % self.__where - elif isinstance(self.__where,list): - self.__sql=self.__sql + " WHERE %s" % self.__listTrans() - else: - print("参数where类型错误",type(self.__where),self.__where) - if self.__order: - s='' - if isinstance(self.__order,list): - for strs in self.__order: - if isinstance(strs,str): - s=s+strs+"," - else: - pass - for key in strs: - s=s+key+" "+strs[key] - s=s+"," - s=s[:-1] - if isinstance(self.__order,str): - if self.__order1: - if len(self.__order1) > 1: - if self.__order1[len(self.__order1)-1] == 'desc' or self.__order1[len(self.__order1)-1] == 'asc': - i=0 - while i 57 and - buf[0] == 0x50 and buf[1] == 0x4B and - buf[2] == 0x3 and buf[3] == 0x4 and - buf[30] == 0x6D and buf[31] == 0x69 and - buf[32] == 0x6D and buf[33] == 0x65 and - buf[34] == 0x74 and buf[35] == 0x79 and - buf[36] == 0x70 and buf[37] == 0x65 and - buf[38] == 0x61 and buf[39] == 0x70 and - buf[40] == 0x70 and buf[41] == 0x6C and - buf[42] == 0x69 and buf[43] == 0x63 and - buf[44] == 0x61 and buf[45] == 0x74 and - buf[46] == 0x69 and buf[47] == 0x6F and - buf[48] == 0x6E and buf[49] == 0x2F and - buf[50] == 0x65 and buf[51] == 0x70 and - buf[52] == 0x75 and buf[53] == 0x62 and - buf[54] == 0x2B and buf[55] == 0x7A and - buf[56] == 0x69 and buf[57] == 0x70) - - -class Zip(Type): - """ - Implements the Zip archive type matcher. - """ - MIME = 'application/zip' - EXTENSION = 'zip' - - def __init__(self): - super(Zip, self).__init__( - mime=Zip.MIME, - extension=Zip.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x50 and buf[1] == 0x4B and - (buf[2] == 0x3 or buf[2] == 0x5 or - buf[2] == 0x7) and - (buf[3] == 0x4 or buf[3] == 0x6 or - buf[3] == 0x8)) - - -class Tar(Type): - """ - Implements the Tar archive type matcher. - """ - MIME = 'application/x-tar' - EXTENSION = 'tar' - - def __init__(self): - super(Tar, self).__init__( - mime=Tar.MIME, - extension=Tar.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 261 and - buf[257] == 0x75 and - buf[258] == 0x73 and - buf[259] == 0x74 and - buf[260] == 0x61 and - buf[261] == 0x72) - - -class Rar(Type): - """ - Implements the RAR archive type matcher. - """ - MIME = 'application/x-rar-compressed' - EXTENSION = 'rar' - - def __init__(self): - super(Rar, self).__init__( - mime=Rar.MIME, - extension=Rar.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 6 and - buf[0] == 0x52 and - buf[1] == 0x61 and - buf[2] == 0x72 and - buf[3] == 0x21 and - buf[4] == 0x1A and - buf[5] == 0x7 and - (buf[6] == 0x0 or - buf[6] == 0x1)) - - -class Gz(Type): - """ - Implements the GZ archive type matcher. - """ - MIME = 'application/gzip' - EXTENSION = 'gz' - - def __init__(self): - super(Gz, self).__init__( - mime=Gz.MIME, - extension=Gz.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 2 and - buf[0] == 0x1F and - buf[1] == 0x8B and - buf[2] == 0x8) - - -class Bz2(Type): - """ - Implements the BZ2 archive type matcher. - """ - MIME = 'application/x-bzip2' - EXTENSION = 'bz2' - - def __init__(self): - super(Bz2, self).__init__( - mime=Bz2.MIME, - extension=Bz2.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 2 and - buf[0] == 0x42 and - buf[1] == 0x5A and - buf[2] == 0x68) - - -class SevenZ(Type): - """ - Implements the SevenZ (7z) archive type matcher. - """ - MIME = 'application/x-7z-compressed' - EXTENSION = '7z' - - def __init__(self): - super(SevenZ, self).__init__( - mime=SevenZ.MIME, - extension=SevenZ.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 5 and - buf[0] == 0x37 and - buf[1] == 0x7A and - buf[2] == 0xBC and - buf[3] == 0xAF and - buf[4] == 0x27 and - buf[5] == 0x1C) - - -class Pdf(Type): - """ - Implements the PDF archive type matcher. - """ - MIME = 'application/pdf' - EXTENSION = 'pdf' - - def __init__(self): - super(Pdf, self).__init__( - mime=Pdf.MIME, - extension=Pdf.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x25 and - buf[1] == 0x50 and - buf[2] == 0x44 and - buf[3] == 0x46) - - -class Exe(Type): - """ - Implements the EXE archive type matcher. - """ - MIME = 'application/x-msdownload' - EXTENSION = 'exe' - - def __init__(self): - super(Exe, self).__init__( - mime=Exe.MIME, - extension=Exe.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 1 and - buf[0] == 0x4D and - buf[1] == 0x5A) - - -class Swf(Type): - """ - Implements the SWF archive type matcher. - """ - MIME = 'application/x-shockwave-flash' - EXTENSION = 'swf' - - def __init__(self): - super(Swf, self).__init__( - mime=Swf.MIME, - extension=Swf.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 2 and - (buf[0] == 0x43 or - buf[0] == 0x46) and - buf[1] == 0x57 and - buf[2] == 0x53) - - -class Rtf(Type): - """ - Implements the RTF archive type matcher. - """ - MIME = 'application/rtf' - EXTENSION = 'rtf' - - def __init__(self): - super(Rtf, self).__init__( - mime=Rtf.MIME, - extension=Rtf.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 4 and - buf[0] == 0x7B and - buf[1] == 0x5C and - buf[2] == 0x72 and - buf[3] == 0x74 and - buf[4] == 0x66) - - -class Nes(Type): - """ - Implements the NES archive type matcher. - """ - MIME = 'application/x-nintendo-nes-rom' - EXTENSION = 'nes' - - def __init__(self): - super(Nes, self).__init__( - mime=Nes.MIME, - extension=Nes.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x4E and - buf[1] == 0x45 and - buf[2] == 0x53 and - buf[3] == 0x1A) - - -class Crx(Type): - """ - Implements the CRX archive type matcher. - """ - MIME = 'application/x-google-chrome-extension' - EXTENSION = 'crx' - - def __init__(self): - super(Crx, self).__init__( - mime=Crx.MIME, - extension=Crx.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x43 and - buf[1] == 0x72 and - buf[2] == 0x32 and - buf[3] == 0x34) - - -class Cab(Type): - """ - Implements the CAB archive type matcher. - """ - MIME = 'application/vnd.ms-cab-compressed' - EXTENSION = 'cab' - - def __init__(self): - super(Cab, self).__init__( - mime=Cab.MIME, - extension=Cab.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - ((buf[0] == 0x4D and - buf[1] == 0x53 and - buf[2] == 0x43 and - buf[3] == 0x46) or - (buf[0] == 0x49 and - buf[1] == 0x53 and - buf[2] == 0x63 and - buf[3] == 0x28))) - - -class Eot(Type): - """ - Implements the EOT archive type matcher. - """ - MIME = 'application/octet-stream' - EXTENSION = 'eot' - - def __init__(self): - super(Eot, self).__init__( - mime=Eot.MIME, - extension=Eot.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 35 and - buf[34] == 0x4C and - buf[35] == 0x50 and - ((buf[8] == 0x02 and - buf[9] == 0x00 and - buf[10] == 0x01) or - (buf[8] == 0x01 and - buf[9] == 0x00 and - buf[10] == 0x00) or - (buf[8] == 0x02 and - buf[9] == 0x00 and - buf[10] == 0x02))) - - -class Ps(Type): - """ - Implements the PS archive type matcher. - """ - MIME = 'application/postscript' - EXTENSION = 'ps' - - def __init__(self): - super(Ps, self).__init__( - mime=Ps.MIME, - extension=Ps.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 1 and - buf[0] == 0x25 and - buf[1] == 0x21) - - -class Xz(Type): - """ - Implements the XS archive type matcher. - """ - MIME = 'application/x-xz' - EXTENSION = 'xz' - - def __init__(self): - super(Xz, self).__init__( - mime=Xz.MIME, - extension=Xz.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 5 and - buf[0] == 0xFD and - buf[1] == 0x37 and - buf[2] == 0x7A and - buf[3] == 0x58 and - buf[4] == 0x5A and - buf[5] == 0x00) - - -class Sqlite(Type): - """ - Implements the Sqlite DB archive type matcher. - """ - MIME = 'application/x-sqlite3' - EXTENSION = 'sqlite' - - def __init__(self): - super(Sqlite, self).__init__( - mime=Sqlite.MIME, - extension=Sqlite.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x53 and - buf[1] == 0x51 and - buf[2] == 0x4C and - buf[3] == 0x69) - - -class Deb(Type): - """ - Implements the DEB archive type matcher. - """ - MIME = 'application/x-deb' - EXTENSION = 'deb' - - def __init__(self): - super(Deb, self).__init__( - mime=Deb.MIME, - extension=Deb.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 20 and - buf[0] == 0x21 and - buf[1] == 0x3C and - buf[2] == 0x61 and - buf[3] == 0x72 and - buf[4] == 0x63 and - buf[5] == 0x68 and - buf[6] == 0x3E and - buf[7] == 0x0A and - buf[8] == 0x64 and - buf[9] == 0x65 and - buf[10] == 0x62 and - buf[11] == 0x69 and - buf[12] == 0x61 and - buf[13] == 0x6E and - buf[14] == 0x2D and - buf[15] == 0x62 and - buf[16] == 0x69 and - buf[17] == 0x6E and - buf[18] == 0x61 and - buf[19] == 0x72 and - buf[20] == 0x79) - - -class Ar(Type): - """ - Implements the AR archive type matcher. - """ - MIME = 'application/x-unix-archive' - EXTENSION = 'ar' - - def __init__(self): - super(Ar, self).__init__( - mime=Ar.MIME, - extension=Ar.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 6 and - buf[0] == 0x21 and - buf[1] == 0x3C and - buf[2] == 0x61 and - buf[3] == 0x72 and - buf[4] == 0x63 and - buf[5] == 0x68 and - buf[6] == 0x3E) - - -class Z(Type): - """ - Implements the Z archive type matcher. - """ - MIME = 'application/x-compress' - EXTENSION = 'Z' - - def __init__(self): - super(Z, self).__init__( - mime=Z.MIME, - extension=Z.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 1 and - ((buf[0] == 0x1F and - buf[1] == 0xA0) or - (buf[0] == 0x1F and - buf[1] == 0x9D))) - - -class Lz(Type): - """ - Implements the Lz archive type matcher. - """ - MIME = 'application/x-lzip' - EXTENSION = 'lz' - - def __init__(self): - super(Lz, self).__init__( - mime=Lz.MIME, - extension=Lz.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x4C and - buf[1] == 0x5A and - buf[2] == 0x49 and - buf[3] == 0x50) diff --git a/utill/filetype/types/audio.py b/utill/filetype/types/audio.py deleted file mode 100644 index 5dafba5..0000000 --- a/utill/filetype/types/audio.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -from .base import Type - - -class Midi(Type): - """ - Implements the Midi audio type matcher. - """ - MIME = 'audio/midi' - EXTENSION = 'midi' - - def __init__(self): - super(Midi, self).__init__( - mime=Midi.MIME, - extension=Midi.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x4D and - buf[1] == 0x54 and - buf[2] == 0x68 and - buf[3] == 0x64) - - -class Mp3(Type): - """ - Implements the MP3 audio type matcher. - """ - MIME = 'audio/mpeg' - EXTENSION = 'mp3' - - def __init__(self): - super(Mp3, self).__init__( - mime=Mp3.MIME, - extension=Mp3.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 2 and - ((buf[0] == 0x49 and - buf[1] == 0x44 and - buf[2] == 0x33) or - (buf[0] == 0xFF and - buf[1] == 0xfb))) - - -class M4a(Type): - """ - Implements the M4A audio type matcher. - """ - MIME = 'audio/m4a' - EXTENSION = 'm4a' - - def __init__(self): - super(M4a, self).__init__( - mime=M4a.MIME, - extension=M4a.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 10 and - ((buf[4] == 0x66 and - buf[5] == 0x74 and - buf[6] == 0x79 and - buf[7] == 0x70 and - buf[8] == 0x4D and - buf[9] == 0x34 and - buf[10] == 0x41) or - (buf[0] == 0x4D and - buf[1] == 0x34 and - buf[2] == 0x41 and - buf[3] == 0x20))) - - -class Ogg(Type): - """ - Implements the OGG audio type matcher. - """ - MIME = 'audio/ogg' - EXTENSION = 'ogg' - - def __init__(self): - super(Ogg, self).__init__( - mime=Ogg.MIME, - extension=Ogg.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x4F and - buf[1] == 0x67 and - buf[2] == 0x67 and - buf[3] == 0x53) - - -class Flac(Type): - """ - Implements the FLAC audio type matcher. - """ - MIME = 'audio/x-flac' - EXTENSION = 'flac' - - def __init__(self): - super(Flac, self).__init__( - mime=Flac.MIME, - extension=Flac.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x66 and - buf[1] == 0x4C and - buf[2] == 0x61 and - buf[3] == 0x43) - - -class Wav(Type): - """ - Implements the WAV audio type matcher. - """ - MIME = 'audio/x-wav' - EXTENSION = 'wav' - - def __init__(self): - super(Wav, self).__init__( - mime=Wav.MIME, - extension=Wav.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 11 and - buf[0] == 0x52 and - buf[1] == 0x49 and - buf[2] == 0x46 and - buf[3] == 0x46 and - buf[8] == 0x57 and - buf[9] == 0x41 and - buf[10] == 0x56 and - buf[11] == 0x45) - - -class Amr(Type): - """ - Implements the AMR audio type matcher. - """ - MIME = 'audio/amr' - EXTENSION = 'amr' - - def __init__(self): - super(Amr, self).__init__( - mime=Amr.MIME, - extension=Amr.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 11 and - buf[0] == 0x23 and - buf[1] == 0x21 and - buf[2] == 0x41 and - buf[3] == 0x4D and - buf[4] == 0x52 and - buf[5] == 0x0A) diff --git a/utill/filetype/types/base.py b/utill/filetype/types/base.py deleted file mode 100644 index 8213da1..0000000 --- a/utill/filetype/types/base.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- - - -class Type(object): - """ - Represents the file type object inherited by - specific file type matchers. - Provides convenient accessor and helper methods. - """ - def __init__(self, mime, extension): - self.__mime = mime - self.__extension = extension - - @property - def mime(self): - return self.__mime - - @property - def extension(self): - return self.__extension - - @property - def is_extension(self, extension): - return self.__extension is extension - - @property - def is_mime(self, mime): - return self.__mime is mime - - def match(self, buf): - raise NotImplementedError diff --git a/utill/filetype/types/font.py b/utill/filetype/types/font.py deleted file mode 100644 index bdecf39..0000000 --- a/utill/filetype/types/font.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -from .base import Type - - -class Woff(Type): - """ - Implements the WOFF font type matcher. - """ - MIME = 'application/font-woff' - EXTENSION = 'woff' - - def __init__(self): - super(Woff, self).__init__( - mime=Woff.MIME, - extension=Woff.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 7 and - buf[0] == 0x77 and - buf[1] == 0x4F and - buf[2] == 0x46 and - buf[3] == 0x46 and - buf[4] == 0x00 and - buf[5] == 0x01 and - buf[6] == 0x00 and - buf[7] == 0x00) - - -class Woff2(Type): - """ - Implements the WOFF2 font type matcher. - """ - MIME = 'application/font-woff' - EXTENSION = 'woff2' - - def __init__(self): - super(Woff2, self).__init__( - mime=Woff2.MIME, - extension=Woff2.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 7 and - buf[0] == 0x77 and - buf[1] == 0x4F and - buf[2] == 0x46 and - buf[3] == 0x32 and - buf[4] == 0x00 and - buf[5] == 0x01 and - buf[6] == 0x00 and - buf[7] == 0x00) - - -class Ttf(Type): - """ - Implements the TTF font type matcher. - """ - MIME = 'application/font-sfnt' - EXTENSION = 'ttf' - - def __init__(self): - super(Ttf, self).__init__( - mime=Ttf.MIME, - extension=Ttf.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 4 and - buf[0] == 0x00 and - buf[1] == 0x01 and - buf[2] == 0x00 and - buf[3] == 0x00 and - buf[4] == 0x00) - - -class Otf(Type): - """ - Implements the OTF font type matcher. - """ - MIME = 'application/font-sfnt' - EXTENSION = 'otf' - - def __init__(self): - super(Otf, self).__init__( - mime=Otf.MIME, - extension=Otf.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 4 and - buf[0] == 0x4F and - buf[1] == 0x54 and - buf[2] == 0x54 and - buf[3] == 0x4F and - buf[4] == 0x00) diff --git a/utill/filetype/types/image.py b/utill/filetype/types/image.py deleted file mode 100644 index 1fd6e17..0000000 --- a/utill/filetype/types/image.py +++ /dev/null @@ -1,279 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -from .base import Type -from .isobmff import IsoBmff - - -class Jpeg(Type): - """ - Implements the JPEG image type matcher. - """ - MIME = 'image/jpeg' - EXTENSION = 'jpg' - - def __init__(self): - super(Jpeg, self).__init__( - mime=Jpeg.MIME, - extension=Jpeg.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 2 and - buf[0] == 0xFF and - buf[1] == 0xD8 and - buf[2] == 0xFF) - - -class Jpx(Type): - """ - Implements the JPEG2000 image type matcher. - """ - - MIME = "image/jpx" - EXTENSION = "jpx" - - def __init__(self): - super(Jpx, self).__init__(mime=Jpx.MIME, extension=Jpx.EXTENSION) - - def match(self, buf): - return ( - len(buf) > 50 - and buf[0] == 0x00 - and buf[1] == 0x00 - and buf[2] == 0x00 - and buf[3] == 0x0C - and buf[16:24] == b"ftypjp2 " - ) - - -class Png(Type): - """ - Implements the PNG image type matcher. - """ - MIME = 'image/png' - EXTENSION = 'png' - - def __init__(self): - super(Png, self).__init__( - mime=Png.MIME, - extension=Png.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x89 and - buf[1] == 0x50 and - buf[2] == 0x4E and - buf[3] == 0x47) - - -class Gif(Type): - """ - Implements the GIF image type matcher. - """ - MIME = 'image/gif' - EXTENSION = 'gif' - - def __init__(self): - super(Gif, self).__init__( - mime=Gif.MIME, - extension=Gif.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 2 and - buf[0] == 0x47 and - buf[1] == 0x49 and - buf[2] == 0x46) - - -class Webp(Type): - """ - Implements the WEBP image type matcher. - """ - MIME = 'image/webp' - EXTENSION = 'webp' - - def __init__(self): - super(Webp, self).__init__( - mime=Webp.MIME, - extension=Webp.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 11 and - buf[8] == 0x57 and - buf[9] == 0x45 and - buf[10] == 0x42 and - buf[11] == 0x50) - - -class Cr2(Type): - """ - Implements the CR2 image type matcher. - """ - MIME = 'image/x-canon-cr2' - EXTENSION = 'cr2' - - def __init__(self): - super(Cr2, self).__init__( - mime=Cr2.MIME, - extension=Cr2.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 9 and - ((buf[0] == 0x49 and buf[1] == 0x49 and - buf[2] == 0x2A and buf[3] == 0x0) or - (buf[0] == 0x4D and buf[1] == 0x4D and - buf[2] == 0x0 and buf[3] == 0x2A)) and - buf[8] == 0x43 and buf[9] == 0x52) - - -class Tiff(Type): - """ - Implements the TIFF image type matcher. - """ - MIME = 'image/tiff' - EXTENSION = 'tif' - - def __init__(self): - super(Tiff, self).__init__( - mime=Tiff.MIME, - extension=Tiff.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 3 and - ((buf[0] == 0x49 and buf[1] == 0x49 and - buf[2] == 0x2A and buf[3] == 0x0) or - (buf[0] == 0x4D and buf[1] == 0x4D and - buf[2] == 0x0 and buf[3] == 0x2A))) - - -class Bmp(Type): - """ - Implements the BMP image type matcher. - """ - MIME = 'image/bmp' - EXTENSION = 'bmp' - - def __init__(self): - super(Bmp, self).__init__( - mime=Bmp.MIME, - extension=Bmp.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 1 and - buf[0] == 0x42 and - buf[1] == 0x4D) - - -class Jxr(Type): - """ - Implements the JXR image type matcher. - """ - MIME = 'image/vnd.ms-photo' - EXTENSION = 'jxr' - - def __init__(self): - super(Jxr, self).__init__( - mime=Jxr.MIME, - extension=Jxr.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 2 and - buf[0] == 0x49 and - buf[1] == 0x49 and - buf[2] == 0xBC) - - -class Psd(Type): - """ - Implements the PSD image type matcher. - """ - MIME = 'image/vnd.adobe.photoshop' - EXTENSION = 'psd' - - def __init__(self): - super(Psd, self).__init__( - mime=Psd.MIME, - extension=Psd.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x38 and - buf[1] == 0x42 and - buf[2] == 0x50 and - buf[3] == 0x53) - - -class Ico(Type): - """ - Implements the ICO image type matcher. - """ - MIME = 'image/x-icon' - EXTENSION = 'ico' - - def __init__(self): - super(Ico, self).__init__( - mime=Ico.MIME, - extension=Ico.EXTENSION, - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x00 and - buf[1] == 0x00 and - buf[2] == 0x01 and - buf[3] == 0x00) - - -class Heic(IsoBmff): - """ - Implements the HEIC image type matcher. - """ - MIME = 'image/heic' - EXTENSION = 'heic' - - def __init__(self): - super(Heic, self).__init__( - mime=Heic.MIME, - extension=Heic.EXTENSION - ) - - def match(self, buf): - if not self._is_isobmff(buf): - return False - - major_brand, minor_version, compatible_brands = self._get_ftyp(buf) - if major_brand == 'heic': - return True - if major_brand in ['mif1', 'msf1'] and 'heic' in compatible_brands: - return True - return False - - -class Dcm(Type): - - MIME = 'application/dicom' - EXTENSION = 'dcm' - OFFSET = 128 - - def __init__(self): - super(Dcm, self).__init__( - mime=Dcm.MIME, - extension=Dcm.EXTENSION - ) - - def match(self, buf): - return (len(buf) > Dcm.OFFSET + 4 and - buf[Dcm.OFFSET + 0] == 0x44 and - buf[Dcm.OFFSET + 1] == 0x49 and - buf[Dcm.OFFSET + 2] == 0x43 and - buf[Dcm.OFFSET + 3] == 0x4D) diff --git a/utill/filetype/types/isobmff.py b/utill/filetype/types/isobmff.py deleted file mode 100644 index 3d5a1fc..0000000 --- a/utill/filetype/types/isobmff.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -import codecs - -from .base import Type - - -class IsoBmff(Type): - """ - Implements the ISO-BMFF base type. - """ - def __init__(self, mime, extension): - super(IsoBmff, self).__init__( - mime=mime, - extension=extension - ) - - def _is_isobmff(self, buf): - if len(buf) < 16 or buf[4:8] != b'ftyp': - return False - if len(buf) < int(codecs.encode(buf[0:4], 'hex'), 16): - return False - return True - - def _get_ftyp(self, buf): - ftyp_len = int(codecs.encode(buf[0:4], 'hex'), 16) - major_brand = buf[8:12].decode() - minor_version = int(codecs.encode(buf[12:16], 'hex'), 16) - compatible_brands = [] - for i in range(16, ftyp_len, 4): - compatible_brands.append(buf[i:i+4].decode()) - - return major_brand, minor_version, compatible_brands diff --git a/utill/filetype/types/video.py b/utill/filetype/types/video.py deleted file mode 100644 index 9955397..0000000 --- a/utill/filetype/types/video.py +++ /dev/null @@ -1,216 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -from .base import Type -from .isobmff import IsoBmff - - -class Mp4(IsoBmff): - """ - Implements the MP4 video type matcher. - """ - MIME = 'video/mp4' - EXTENSION = 'mp4' - - def __init__(self): - super(Mp4, self).__init__( - mime=Mp4.MIME, - extension=Mp4.EXTENSION - ) - - def match(self, buf): - if not self._is_isobmff(buf): - return False - - major_brand, minor_version, compatible_brands = self._get_ftyp(buf) - return major_brand in ['mp41', 'mp42'] - - -class M4v(Type): - """ - Implements the M4V video type matcher. - """ - MIME = 'video/x-m4v' - EXTENSION = 'm4v' - - def __init__(self): - super(M4v, self).__init__( - mime=M4v.MIME, - extension=M4v.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 10 and - buf[0] == 0x0 and buf[1] == 0x0 and - buf[2] == 0x0 and buf[3] == 0x1C and - buf[4] == 0x66 and buf[5] == 0x74 and - buf[6] == 0x79 and buf[7] == 0x70 and - buf[8] == 0x4D and buf[9] == 0x34 and - buf[10] == 0x56) - - -class Mkv(Type): - """ - Implements the MKV video type matcher. - """ - MIME = 'video/x-matroska' - EXTENSION = 'mkv' - - def __init__(self): - super(Mkv, self).__init__( - mime=Mkv.MIME, - extension=Mkv.EXTENSION - ) - - def match(self, buf): - return ((len(buf) > 15 and - buf[0] == 0x1A and buf[1] == 0x45 and - buf[2] == 0xDF and buf[3] == 0xA3 and - buf[4] == 0x93 and buf[5] == 0x42 and - buf[6] == 0x82 and buf[7] == 0x88 and - buf[8] == 0x6D and buf[9] == 0x61 and - buf[10] == 0x74 and buf[11] == 0x72 and - buf[12] == 0x6F and buf[13] == 0x73 and - buf[14] == 0x6B and buf[15] == 0x61) or - (len(buf) > 38 and - buf[31] == 0x6D and buf[32] == 0x61 and - buf[33] == 0x74 and buf[34] == 0x72 and - buf[35] == 0x6f and buf[36] == 0x73 and - buf[37] == 0x6B and buf[38] == 0x61)) - - -class Webm(Type): - """ - Implements the WebM video type matcher. - """ - MIME = 'video/webm' - EXTENSION = 'webm' - - def __init__(self): - super(Webm, self).__init__( - mime=Webm.MIME, - extension=Webm.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x1A and - buf[1] == 0x45 and - buf[2] == 0xDF and - buf[3] == 0xA3) - - -class Mov(IsoBmff): - """ - Implements the MOV video type matcher. - """ - MIME = 'video/quicktime' - EXTENSION = 'mov' - - def __init__(self): - super(Mov, self).__init__( - mime=Mov.MIME, - extension=Mov.EXTENSION - ) - - def match(self, buf): - if not self._is_isobmff(buf): - return False - - major_brand, minor_version, compatible_brands = self._get_ftyp(buf) - return major_brand == 'qt ' - - -class Avi(Type): - """ - Implements the AVI video type matcher. - """ - MIME = 'video/x-msvideo' - EXTENSION = 'avi' - - def __init__(self): - super(Avi, self).__init__( - mime=Avi.MIME, - extension=Avi.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 10 and - buf[0] == 0x52 and - buf[1] == 0x49 and - buf[2] == 0x46 and - buf[3] == 0x46 and - buf[8] == 0x41 and - buf[9] == 0x56 and - buf[10] == 0x49) - - -class Wmv(Type): - """ - Implements the WMV video type matcher. - """ - MIME = 'video/x-ms-wmv' - EXTENSION = 'wmv' - - def __init__(self): - super(Wmv, self).__init__( - mime=Wmv.MIME, - extension=Wmv.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 9 and - buf[0] == 0x30 and - buf[1] == 0x26 and - buf[2] == 0xB2 and - buf[3] == 0x75 and - buf[4] == 0x8E and - buf[5] == 0x66 and - buf[6] == 0xCF and - buf[7] == 0x11 and - buf[8] == 0xA6 and - buf[9] == 0xD9) - - -class Flv(Type): - """ - Implements the FLV video type matcher. - """ - MIME = 'video/x-flv' - EXTENSION = 'flv' - - def __init__(self): - super(Flv, self).__init__( - mime=Flv.MIME, - extension=Flv.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x46 and - buf[1] == 0x4C and - buf[2] == 0x56 and - buf[3] == 0x01) - - -class Mpeg(Type): - """ - Implements the MPEG video type matcher. - """ - MIME = 'video/mpeg' - EXTENSION = 'mpg' - - def __init__(self): - super(Mpeg, self).__init__( - mime=Mpeg.MIME, - extension=Mpeg.EXTENSION - ) - - def match(self, buf): - return (len(buf) > 3 and - buf[0] == 0x0 and - buf[1] == 0x0 and - buf[2] == 0x1 and - buf[3] >= 0xb0 and - buf[3] <= 0xbf) diff --git a/utill/filetype/utils.py b/utill/filetype/utils.py deleted file mode 100644 index e5f2a07..0000000 --- a/utill/filetype/utils.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- - -_NUM_SIGNATURE_BYTES = 262 - - -def get_signature_bytes(path): - """ - Reads file from disk and returns the first 262 bytes - of data representing the magic number header signature. - - Args: - path: path string to file. - - Returns: - First 262 bytes of the file content as bytearray type. - """ - with open(path, 'rb') as fp: - return bytearray(fp.read(_NUM_SIGNATURE_BYTES)) - - -def signature(array): - """ - Returns the first 262 bytes of the given bytearray - as part of the file header signature. - - Args: - array: bytearray to extract the header signature. - - Returns: - First 262 bytes of the file content as bytearray type. - """ - length = len(array) - index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length - - return array[:index] - - -def get_bytes(obj): - """ - Infers the input type and reads the first 262 bytes, - returning a sliced bytearray. - - Args: - obj: path to readable, file, bytes or bytearray. - - Returns: - First 262 bytes of the file content as bytearray type. - - Raises: - TypeError: if obj is not a supported type. - """ - try: - obj = obj.read(_NUM_SIGNATURE_BYTES) - except AttributeError: - # duck-typing as readable failed - we'll try the other options - pass - - kind = type(obj) - - if kind is bytearray: - return signature(obj) - - if kind is str: - return get_signature_bytes(obj) - - if kind is bytes: - return signature(obj) - - if kind is memoryview: - return signature(obj).tolist() - - raise TypeError('Unsupported type as file input: %s' % kind) diff --git a/utill/http.py b/utill/http.py deleted file mode 100644 index 7973038..0000000 --- a/utill/http.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -import requests,traceback -from requests.packages.urllib3.exceptions import InsecureRequestWarning -requests.packages.urllib3.disable_warnings(InsecureRequestWarning) -class Http: - "http请求类" - set_proxies=None #设置代理 - set_cookies={} #设置请求cookie - set_header={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'} #请求头 - set_timeout=10 #超时 20秒 - set_max_retries=2 #重试次数 (实际请求3次) - set_verify=False #SSL 证书的验证 sll证书路径 - set_encoding="utf-8" #设置text输出编码 - set_session=True #是否启用会话 - - get_header={} #获取响应头 - get_cookies={} #获取最后的响应cookie - get_cookie_str='' #获取最后的响应cookie 字符串 - get_text='' #获取body响应内容 - get_content='' #获取body响应二进制内容 - get_response='' #获取响应对象 - get_status_code=None #获取响应状态码 - - req=None - def gettext(self): - """得到响应text""" - return self.get_text - def openurl(self,url,method="GET",data=None,files=None): - """模拟浏览器请求 - - url : 目标地址 - - method :GET POST - - data:请求参数 - """ - if self.set_session: - if self.req is None: - self.req = requests.Session() - self.req.mount('http://', requests.adapters.HTTPAdapter(max_retries=self.set_max_retries)) - self.req.mount('https://', requests.adapters.HTTPAdapter(max_retries=self.set_max_retries)) - else: - if self.req is None: - self.req = requests - if self.set_cookies and isinstance(self.set_cookies,str): - self.cookieserTdict() - response=self.req.request(method, url,data=data,files=files,proxies=self.set_proxies,cookies=self.set_cookies,headers=self.set_header,timeout=self.set_timeout,verify=self.set_verify) - response.encoding=self.set_encoding - self.get_header=dict(response.headers) - cookie=requests.utils.dict_from_cookiejar(response.cookies) - if self.get_cookies and cookie: - self.get_cookies=self.__merge(self.get_cookies,cookie) - elif cookie: - self.get_cookies=cookie - if self.set_cookies: - self.get_cookies=self.__merge(self.set_cookies,self.get_cookies) - if self.get_cookies: - cookies='' - for key in self.get_cookies: - cookies=cookies+key+"="+self.get_cookies[key]+";" - self.get_cookie_str=cookies - self.get_text=response.text - self.get_content=response.content - self.get_response=response - self.get_status_code=int(response.status_code) - def __merge(self,dict1, dict2): - "合并两个字典" - C_dict = {} - for key,value in dict1.items(): - C_dict[key]=value - for key,value in dict2.items(): - C_dict[key]=value - return C_dict - def cookieserTdict(self): - "cookies字符串转换字典" - if isinstance(self.set_cookies,str): - cok={} - for line in self.set_cookies.split(";"): - lists=line.split("=",1) - # print(lists[]) - if lists[0]: - cok[lists[0]]=lists[1] - self.set_cookies=cok \ No newline at end of file diff --git a/utill/queues.py b/utill/queues.py deleted file mode 100644 index 5dd9d70..0000000 --- a/utill/queues.py +++ /dev/null @@ -1,101 +0,0 @@ -from queue import Queue -from .db import model -from .db import sqlite as kcwsqlite -import threading,time,os,hashlib,random -queuesdbpath=os.path.split(os.path.realpath(__file__))[0]+"/Queues" -class model_task(model.model): - "任务" - config={'type':'sqlite','db':queuesdbpath} - model.dbtype.conf=config - table="Queues" - fields={ - "id":model.dbtype.int(LEN=11,PRI=True,A_L=True), #设置id为自增主键 - "taskid":model.dbtype.varchar(LEN=32,DEFAULT=''), #设置id为自增主键 - "title":model.dbtype.varchar(LEN=1024,DEFAULT=''), #名称 - "describes":model.dbtype.varchar(LEN=2048,DEFAULT=''), #描述 - "code":model.dbtype.int(LEN=11,DEFAULT=2), #状态码 0成功 1失败 2等待中 3正在执行 4完成 - "msg":model.dbtype.text(), #状态描述 - "error":model.dbtype.text(), #异常信息 - "addtime":model.dbtype.int(LEN=11,DEFAULT=0) #添加时间 - } -class Queues(): - __globalqueue=None - def start(): - Queues.__globalqueue=Queue() - t=threading.Thread(target=Queues.__messagequeue) - t.daemon=True - t.start() - def __messagequeue(): - if not os.path.isfile(queuesdbpath): - t=model_task() - t.create_table() - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where(True).delete() - while True: - if not Queues.__globalqueue.empty(): - value=Queues.__globalqueue.get() - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":3,"msg":"正在执行","error":""}) - if value['args']: - try: - value['target'](*value['args']) - except Exception as e: - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) - else: - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) - else: - try: - value['target']() - except Exception as e: - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) - else: - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) - else: - time.sleep(1) - def insert(target,args=None,title="默认任务",describes="",msg='等待中'): #add_queue - """添加队列 - - target 方法名 必须 - - args 方法参数 非必须 如 - - title 任务名称 - - describes 任务描述 - - return taskid - """ - if not os.path.isfile(queuesdbpath): - t=model_task() - t.create_table() - ttt=int(time.time()) - print(ttt) - m = hashlib.md5() - m.update((str(ttt)+str(random.randint(100000,999999))).encode(encoding='utf-8')) - taskid=m.hexdigest() - task={"taskid":taskid,"title":title,"describes":describes,"code":2,"msg":msg,"error":"","addtime":ttt} - key={"target":target,"args":args,"task":task} - Queues.__globalqueue.put(key) - kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").insert(task) - return taskid - def getall(code=''): - """获取全部队列 - - code 1获取失败的任务 2获取等待中的任务 3获取正在执行中的任务 4获取执行完成的任务 - """ - if not os.path.isfile(queuesdbpath): - t=model_task() - t.create_table() - where=False - if code: - where="code="+code - # else: - # where="code!=4" - return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where(where).select() - def status(taskid): - """获取任务状态 - - taskid 任务id - """ - if not os.path.isfile(queuesdbpath): - t=model_task() - t.create_table() - return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where("taskid",taskid).find() \ No newline at end of file diff --git a/utill/redis.py b/utill/redis.py deleted file mode 100644 index d717e68..0000000 --- a/utill/redis.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -from kcweb.utill import rediss as red -from kcweb import config -import json -class redis: - "redis 注意:连接池链接模式下不支持动态配置" - __redisObj=None - __config=config.redis - def __connects(self): - """设置redis链接""" - if self.__config['pattern']: - if not self.__redisObj: - if self.__config['password']: - redis_pool=red.ConnectionPool(host=self.__config['host'],password=self.__config['password'],port=self.__config['port'],db=self.__config['db']) - else: - redis_pool=red.ConnectionPool(host=self.__config['host'],port=self.__config['port'],db=self.__config['db']) - self.__redisObj=red.Redis(connection_pool=redis_pool) - else: - if self.__config['password']: - self.__redisObj=red.Redis(host=self.__config['host'],password=self.__config['password'],port=self.__config['port'],db=self.__config['db']) - else: - self.__redisObj=red.Redis(host=self.__config['host'],port=self.__config['port'],db=self.__config['db']) - # print(self.__redisObj) - def __json_decode(self,strs): - """json字符串转python类型""" - try: - return json.loads(strs) - except Exception: - return {} - def __json_encode(self,strs): - """转成字符串""" - try: - return json.dumps(strs,ensure_ascii=False) - except Exception: - return "" - def getconfig(self): - return self.__config - def connect(self,config): - """设置redis链接信息 - - 参数 config 参考配置信息格式 - - 返回 redis - """ - if config: - if isinstance(config,dict): - if "host" in config: - self.__config['host']=config['host'] - if "port" in config: - self.__config['port']=config['port'] - if "password" in config: - self.__config['password']=config['password'] - if "db" in config: - self.__config['db']=config['db'] - return self - def redisObj(self): - "得到一个redis连接对象,执行更多高级操作" - self.__connects() - return self.__redisObj - def getstr(self,name): - """获取name的值 - - name,键 - 返回键“name”处的值,如果该键不存在,则返回“none” - """ - self.__connects() - return self.__redisObj.get(name) - def setstr(self,name,value,ex=None, px=None, nx=False, xx=False): - """ - name,键 - - value,值 只能是字符串 - - ex,过期时间(秒) - - px,过期时间(毫秒) - - nx,如果设置为True,则只有key不存在时,当前set操作才执行,同#setnx(key, value) - - xx,如果设置为True,则只有key存在时,当前set操作才执行 - """ - if not self.__redisObj: - self.__connects() - if not ex and not px: - if self.__config['ex']: - ex=self.__config['ex'] - return self.__redisObj.set(name, value, ex=ex, px=px, nx=nx, xx=xx) - def append(self,name,value): - """将字符串“value”追加到“name”处的值。如果``键`` 不存在,请使用值“name”创建它。 返回位于“name”的值的新长度。 - - name,键 - - value,值 只能是字符串 - """ - self.__connects() - return self.__redisObj.append(name,value) - - def set(self,name,value,ex=None, px=None, nx=False, xx=False): - """ - name,键 - - value,值 可以是字典 列表 或字符串 - - ex,过期时间(秒) - - px,过期时间(毫秒) - - nx,如果设置为True,则只有key不存在时,当前set操作才执行 - - xx,如果设置为True,则只有key存在时,当前set操作才执行 - """ - if not self.__redisObj: - self.__connects() - if not ex and not px: - if self.__config['ex']: - ex=self.__config['ex'] - value=self.__json_encode(value) - return self.__redisObj.set(name, value, ex=ex, px=px, nx=nx, xx=xx) - - def get(self,name): - """获取name的值 - - name,键 - 返回键“name”处的值,如果该键不存在,则返回“none” - """ - self.__connects() - value=self.__redisObj.get(name) - if value: - value=self.__json_decode(value) - return value - def delete(self,name): - """删除name的值 - - name,键 - - 返回 True,如果该键不存在,则返回 0 - """ - self.__connects() - return self.__redisObj.delete(name) - def rpush(self,name, *values): - "元素从list的右边加入 ,可以添加多个" - self.__connects() - # print(self.__config) - return self.__redisObj.rpush(name, *values) - def rpop(self,name): - "元素从list的右边移出" - self.__connects() - return self.__redisObj.rpop(name) - def rpoplpush(self,src, dst): - "元素从list的右边移出,并且从list的左边加入" - self.__connects() - return self.__redisObj.rpoplpush(src, dst) - def rpushx(self,name, value): - "当name存在时,元素才能从list的右边加入" - self.__connects() - return self.__redisObj.rpushx(name, value) - def lpush(self,name, *values): - "元素从list的左边加入,可以添加多个" - self.__connects() - return self.__redisObj.lpush(name, *values) - def lpop(self,name): - "元素从list的左边移出" - self.__connects() - return self.__redisObj.lpop(name) - def lpushxs(self,name): - "当name存在时,元素才能从list的左边加入" - self.__connects() - return self.__redisObj.lpushx(name) - def hset(self,name,key,value): - """在hash名称中将key设置为value如果HSET创建了新字段,则返回1,否则返回0 - - name,名 - - key,键 - - mapping,值 - """ - self.__connects() - return self.__redisObj.hset(name,key,value) - def hget(self,name,key): - "返回hash的name中的key值" - self.__connects() - return self.__redisObj.hget(name,key) - def hgetall(self,name): - "返回hash名称/值对的Python dict" - self.__connects() - return self.__redisObj.hgetall(name) - def hmset(self,name,mapping,ex=None): - """在hash的name中为每个键设置值 - name,键 - - mapping,值 - - ex,过期时间(秒) - - """ - self.__connects() - boot = self.__redisObj.hmset(name,mapping) - - if not ex: - if self.__config['ex']: - ex=self.__config['ex'] - if ex: - self.__redisObj.expire(name,ex) - return boot - def hmget(self,name, keys, *args): - "返回与“keys”顺序相同的值列表``" - self.__connects() - return self.__redisObj.hmget(name, keys, *args) - diff --git a/utill/rediss/__init__.py b/utill/rediss/__init__.py deleted file mode 100644 index 8c2be51..0000000 --- a/utill/rediss/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .client import Redis, StrictRedis -from .connection import ( - BlockingConnectionPool, - ConnectionPool, - Connection, - SSLConnection, - UnixDomainSocketConnection -) -from .utils import from_url -from .exceptions import ( - AuthenticationError, - BusyLoadingError, - ConnectionError, - DataError, - InvalidResponse, - PubSubError, - ReadOnlyError, - RedisError, - ResponseError, - TimeoutError, - WatchError -) - - -def int_or_str(value): - try: - return int(value) - except ValueError: - return value - - -__version__ = '3.3.8' -VERSION = tuple(map(int_or_str, __version__.split('.'))) - -__all__ = [ - 'Redis', 'StrictRedis', 'ConnectionPool', 'BlockingConnectionPool', - 'Connection', 'SSLConnection', 'UnixDomainSocketConnection', 'from_url', - 'AuthenticationError', 'BusyLoadingError', 'ConnectionError', 'DataError', - 'InvalidResponse', 'PubSubError', 'ReadOnlyError', 'RedisError', - 'ResponseError', 'TimeoutError', 'WatchError' -] diff --git a/utill/rediss/_compat.py b/utill/rediss/_compat.py deleted file mode 100644 index d70af2a..0000000 --- a/utill/rediss/_compat.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Internal module for Python 2 backwards compatibility.""" -import errno -import socket -import sys - -# For Python older than 3.5, retry EINTR. -if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and - sys.version_info[1] < 5): - # Adapted from https://bugs.python.org/review/23863/patch/14532/54418 - import time - - # Wrapper for handling interruptable system calls. - def _retryable_call(s, func, *args, **kwargs): - # Some modules (SSL) use the _fileobject wrapper directly and - # implement a smaller portion of the socket interface, thus we - # need to let them continue to do so. - timeout, deadline = None, 0.0 - attempted = False - try: - timeout = s.gettimeout() - except AttributeError: - pass - - if timeout: - deadline = time.time() + timeout - - try: - while True: - if attempted and timeout: - now = time.time() - if now >= deadline: - raise socket.error(errno.EWOULDBLOCK, "timed out") - else: - # Overwrite the timeout on the socket object - # to take into account elapsed time. - s.settimeout(deadline - now) - try: - attempted = True - return func(*args, **kwargs) - except socket.error as e: - if e.args[0] == errno.EINTR: - continue - raise - finally: - # Set the existing timeout back for future - # calls. - if timeout: - s.settimeout(timeout) - - def recv(sock, *args, **kwargs): - return _retryable_call(sock, sock.recv, *args, **kwargs) - - def recv_into(sock, *args, **kwargs): - return _retryable_call(sock, sock.recv_into, *args, **kwargs) - -else: # Python 3.5 and above automatically retry EINTR - def recv(sock, *args, **kwargs): - return sock.recv(*args, **kwargs) - - def recv_into(sock, *args, **kwargs): - return sock.recv_into(*args, **kwargs) - -if sys.version_info[0] < 3: - from urllib import unquote - from urlparse import parse_qs, urlparse - from itertools import imap, izip - from string import letters as ascii_letters - from Queue import Queue - - # special unicode handling for python2 to avoid UnicodeDecodeError - def safe_unicode(obj, *args): - """ return the unicode representation of obj """ - try: - return unicode(obj, *args) - except UnicodeDecodeError: - # obj is byte string - ascii_text = str(obj).encode('string_escape') - return unicode(ascii_text) - - def iteritems(x): - return x.iteritems() - - def iterkeys(x): - return x.iterkeys() - - def itervalues(x): - return x.itervalues() - - def nativestr(x): - return x if isinstance(x, str) else x.encode('utf-8', 'replace') - - def next(x): - return x.next() - - def byte_to_chr(x): - return x - - unichr = unichr - xrange = xrange - basestring = basestring - unicode = unicode - long = long - BlockingIOError = socket.error -else: - from urllib.parse import parse_qs, unquote, urlparse - from string import ascii_letters - from queue import Queue - - def iteritems(x): - return iter(x.items()) - - def iterkeys(x): - return iter(x.keys()) - - def itervalues(x): - return iter(x.values()) - - def byte_to_chr(x): - return chr(x) - - def nativestr(x): - return x if isinstance(x, str) else x.decode('utf-8', 'replace') - - next = next - unichr = chr - imap = map - izip = zip - xrange = range - basestring = str - unicode = str - safe_unicode = str - long = int - BlockingIOError = BlockingIOError - -try: # Python 3 - from queue import LifoQueue, Empty, Full -except ImportError: # Python 2 - from Queue import LifoQueue, Empty, Full diff --git a/utill/rediss/client.py b/utill/rediss/client.py deleted file mode 100644 index 675b0d0..0000000 --- a/utill/rediss/client.py +++ /dev/null @@ -1,3865 +0,0 @@ -from __future__ import unicode_literals -from itertools import chain -import datetime -import sys -import warnings -import time -import threading -import time as mod_time -import re -import hashlib -from ._compat import (basestring, imap, iteritems, iterkeys, - itervalues, izip, long, nativestr, safe_unicode) -from .connection import (ConnectionPool, UnixDomainSocketConnection, - SSLConnection) -from .lock import Lock -from .exceptions import ( - ConnectionError, - DataError, - ExecAbortError, - NoScriptError, - PubSubError, - RedisError, - ResponseError, - TimeoutError, - WatchError, -) - -SYM_EMPTY = b'' -EMPTY_RESPONSE = 'EMPTY_RESPONSE' - - -def list_or_args(keys, args): - # returns a single new list combining keys and args - try: - iter(keys) - # a string or bytes instance can be iterated, but indicates - # keys wasn't passed as a list - if isinstance(keys, (basestring, bytes)): - keys = [keys] - else: - keys = list(keys) - except TypeError: - keys = [keys] - if args: - keys.extend(args) - return keys - - -def timestamp_to_datetime(response): - "Converts a unix timestamp to a Python datetime object" - if not response: - return None - try: - response = int(response) - except ValueError: - return None - return datetime.datetime.fromtimestamp(response) - - -def string_keys_to_dict(key_string, callback): - return dict.fromkeys(key_string.split(), callback) - - -def dict_merge(*dicts): - merged = {} - for d in dicts: - merged.update(d) - return merged - - -class CaseInsensitiveDict(dict): - "Case insensitive dict implementation. Assumes string keys only." - - def __init__(self, data): - for k, v in iteritems(data): - self[k.upper()] = v - - def __contains__(self, k): - return super(CaseInsensitiveDict, self).__contains__(k.upper()) - - def __delitem__(self, k): - super(CaseInsensitiveDict, self).__delitem__(k.upper()) - - def __getitem__(self, k): - return super(CaseInsensitiveDict, self).__getitem__(k.upper()) - - def get(self, k, default=None): - return super(CaseInsensitiveDict, self).get(k.upper(), default) - - def __setitem__(self, k, v): - super(CaseInsensitiveDict, self).__setitem__(k.upper(), v) - - def update(self, data): - data = CaseInsensitiveDict(data) - super(CaseInsensitiveDict, self).update(data) - - -def parse_debug_object(response): - "Parse the results of Redis's DEBUG OBJECT command into a Python dict" - # The 'type' of the object is the first item in the response, but isn't - # prefixed with a name - response = nativestr(response) - response = 'type:' + response - response = dict(kv.split(':') for kv in response.split()) - - # parse some expected int values from the string response - # note: this cmd isn't spec'd so these may not appear in all redis versions - int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') - for field in int_fields: - if field in response: - response[field] = int(response[field]) - - return response - - -def parse_object(response, infotype): - "Parse the results of an OBJECT command" - if infotype in ('idletime', 'refcount'): - return int_or_none(response) - return response - - -def parse_info(response): - "Parse the result of Redis's INFO command into a Python dict" - info = {} - response = nativestr(response) - - def get_value(value): - if ',' not in value or '=' not in value: - try: - if '.' in value: - return float(value) - else: - return int(value) - except ValueError: - return value - else: - sub_dict = {} - for item in value.split(','): - k, v = item.rsplit('=', 1) - sub_dict[k] = get_value(v) - return sub_dict - - for line in response.splitlines(): - if line and not line.startswith('#'): - if line.find(':') != -1: - # Split, the info fields keys and values. - # Note that the value may contain ':'. but the 'host:' - # pseudo-command is the only case where the key contains ':' - key, value = line.split(':', 1) - if key == 'cmdstat_host': - key, value = line.rsplit(':', 1) - info[key] = get_value(value) - else: - # if the line isn't splittable, append it to the "__raw__" key - info.setdefault('__raw__', []).append(line) - - return info - - -SENTINEL_STATE_TYPES = { - 'can-failover-its-master': int, - 'config-epoch': int, - 'down-after-milliseconds': int, - 'failover-timeout': int, - 'info-refresh': int, - 'last-hello-message': int, - 'last-ok-ping-reply': int, - 'last-ping-reply': int, - 'last-ping-sent': int, - 'master-link-down-time': int, - 'master-port': int, - 'num-other-sentinels': int, - 'num-slaves': int, - 'o-down-time': int, - 'pending-commands': int, - 'parallel-syncs': int, - 'port': int, - 'quorum': int, - 'role-reported-time': int, - 's-down-time': int, - 'slave-priority': int, - 'slave-repl-offset': int, - 'voted-leader-epoch': int -} - - -def parse_sentinel_state(item): - result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) - flags = set(result['flags'].split(',')) - for name, flag in (('is_master', 'master'), ('is_slave', 'slave'), - ('is_sdown', 's_down'), ('is_odown', 'o_down'), - ('is_sentinel', 'sentinel'), - ('is_disconnected', 'disconnected'), - ('is_master_down', 'master_down')): - result[name] = flag in flags - return result - - -def parse_sentinel_master(response): - return parse_sentinel_state(imap(nativestr, response)) - - -def parse_sentinel_masters(response): - result = {} - for item in response: - state = parse_sentinel_state(imap(nativestr, item)) - result[state['name']] = state - return result - - -def parse_sentinel_slaves_and_sentinels(response): - return [parse_sentinel_state(imap(nativestr, item)) for item in response] - - -def parse_sentinel_get_master(response): - return response and (response[0], int(response[1])) or None - - -def pairs_to_dict(response, decode_keys=False): - "Create a dict given a list of key/value pairs" - if response is None: - return {} - if decode_keys: - # the iter form is faster, but I don't know how to make that work - # with a nativestr() map - return dict(izip(imap(nativestr, response[::2]), response[1::2])) - else: - it = iter(response) - return dict(izip(it, it)) - - -def pairs_to_dict_typed(response, type_info): - it = iter(response) - result = {} - for key, value in izip(it, it): - if key in type_info: - try: - value = type_info[key](value) - except Exception: - # if for some reason the value can't be coerced, just use - # the string value - pass - result[key] = value - return result - - -def zset_score_pairs(response, **options): - """ - If ``withscores`` is specified in the options, return the response as - a list of (value, score) pairs - """ - if not response or not options.get('withscores'): - return response - score_cast_func = options.get('score_cast_func', float) - it = iter(response) - return list(izip(it, imap(score_cast_func, it))) - - -def sort_return_tuples(response, **options): - """ - If ``groups`` is specified, return the response as a list of - n-element tuples with n being the value found in options['groups'] - """ - if not response or not options.get('groups'): - return response - n = options['groups'] - return list(izip(*[response[i::n] for i in range(n)])) - - -def int_or_none(response): - if response is None: - return None - return int(response) - - -def nativestr_or_none(response): - if response is None: - return None - return nativestr(response) - - -def parse_stream_list(response): - if response is None: - return None - data = [] - for r in response: - if r is not None: - data.append((r[0], pairs_to_dict(r[1]))) - else: - data.append((None, None)) - return data - - -def pairs_to_dict_with_nativestr_keys(response): - return pairs_to_dict(response, decode_keys=True) - - -def parse_list_of_dicts(response): - return list(imap(pairs_to_dict_with_nativestr_keys, response)) - - -def parse_xclaim(response, **options): - if options.get('parse_justid', False): - return response - return parse_stream_list(response) - - -def parse_xinfo_stream(response): - data = pairs_to_dict(response, decode_keys=True) - first = data['first-entry'] - if first is not None: - data['first-entry'] = (first[0], pairs_to_dict(first[1])) - last = data['last-entry'] - if last is not None: - data['last-entry'] = (last[0], pairs_to_dict(last[1])) - return data - - -def parse_xread(response): - if response is None: - return [] - return [[r[0], parse_stream_list(r[1])] for r in response] - - -def parse_xpending(response, **options): - if options.get('parse_detail', False): - return parse_xpending_range(response) - consumers = [{'name': n, 'pending': long(p)} for n, p in response[3] or []] - return { - 'pending': response[0], - 'min': response[1], - 'max': response[2], - 'consumers': consumers - } - - -def parse_xpending_range(response): - k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered') - return [dict(izip(k, r)) for r in response] - - -def float_or_none(response): - if response is None: - return None - return float(response) - - -def bool_ok(response): - return nativestr(response) == 'OK' - - -def parse_zadd(response, **options): - if response is None: - return None - if options.get('as_score'): - return float(response) - return int(response) - - -def parse_client_list(response, **options): - clients = [] - for c in nativestr(response).splitlines(): - # Values might contain '=' - clients.append(dict(pair.split('=', 1) for pair in c.split(' '))) - return clients - - -def parse_config_get(response, **options): - response = [nativestr(i) if i is not None else None for i in response] - return response and pairs_to_dict(response) or {} - - -def parse_scan(response, **options): - cursor, r = response - return long(cursor), r - - -def parse_hscan(response, **options): - cursor, r = response - return long(cursor), r and pairs_to_dict(r) or {} - - -def parse_zscan(response, **options): - score_cast_func = options.get('score_cast_func', float) - cursor, r = response - it = iter(r) - return long(cursor), list(izip(it, imap(score_cast_func, it))) - - -def parse_slowlog_get(response, **options): - return [{ - 'id': item[0], - 'start_time': int(item[1]), - 'duration': int(item[2]), - 'command': b' '.join(item[3]) - } for item in response] - - -def parse_cluster_info(response, **options): - response = nativestr(response) - return dict(line.split(':') for line in response.splitlines() if line) - - -def _parse_node_line(line): - line_items = line.split(' ') - node_id, addr, flags, master_id, ping, pong, epoch, \ - connected = line.split(' ')[:8] - slots = [sl.split('-') for sl in line_items[8:]] - node_dict = { - 'node_id': node_id, - 'flags': flags, - 'master_id': master_id, - 'last_ping_sent': ping, - 'last_pong_rcvd': pong, - 'epoch': epoch, - 'slots': slots, - 'connected': True if connected == 'connected' else False - } - return addr, node_dict - - -def parse_cluster_nodes(response, **options): - response = nativestr(response) - raw_lines = response - if isinstance(response, basestring): - raw_lines = response.splitlines() - return dict(_parse_node_line(line) for line in raw_lines) - - -def parse_georadius_generic(response, **options): - if options['store'] or options['store_dist']: - # `store` and `store_diff` cant be combined - # with other command arguments. - return response - - if type(response) != list: - response_list = [response] - else: - response_list = response - - if not options['withdist'] and not options['withcoord']\ - and not options['withhash']: - # just a bunch of places - return response_list - - cast = { - 'withdist': float, - 'withcoord': lambda ll: (float(ll[0]), float(ll[1])), - 'withhash': int - } - - # zip all output results with each casting functino to get - # the properly native Python value. - f = [lambda x: x] - f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]] - return [ - list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list - ] - - -def parse_pubsub_numsub(response, **options): - return list(zip(response[0::2], response[1::2])) - - -def parse_client_kill(response, **options): - if isinstance(response, (long, int)): - return int(response) - return nativestr(response) == 'OK' - - -class Redis(object): - """ - Implementation of the Redis protocol. - - This abstract class provides a Python interface to all Redis commands - and an implementation of the Redis protocol. - - Connection and Pipeline derive from this, implementing how - the commands are sent and received to the Redis server - """ - RESPONSE_CALLBACKS = dict_merge( - string_keys_to_dict( - 'AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST ' - 'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX', - bool - ), - string_keys_to_dict( - 'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN ' - 'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD ' - 'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN ' - 'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM ' - 'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE', - int - ), - string_keys_to_dict( - 'INCRBYFLOAT HINCRBYFLOAT', - float - ), - string_keys_to_dict( - # these return OK, or int if redis-server is >=1.3.4 - 'LPUSH RPUSH', - lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK' - ), - string_keys_to_dict('SORT', sort_return_tuples), - string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none), - string_keys_to_dict( - 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE ' - 'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ', - bool_ok - ), - string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None), - string_keys_to_dict( - 'SDIFF SINTER SMEMBERS SUNION', - lambda r: r and set(r) or set() - ), - string_keys_to_dict( - 'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE', - zset_score_pairs - ), - string_keys_to_dict('BZPOPMIN BZPOPMAX', \ - lambda r: r and (r[0], r[1], float(r[2])) or None), - string_keys_to_dict('ZRANK ZREVRANK', int_or_none), - string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list), - string_keys_to_dict('XREAD XREADGROUP', parse_xread), - string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True), - { - 'CLIENT GETNAME': lambda r: r and nativestr(r), - 'CLIENT ID': int, - 'CLIENT KILL': parse_client_kill, - 'CLIENT LIST': parse_client_list, - 'CLIENT SETNAME': bool_ok, - 'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False, - 'CLIENT PAUSE': bool_ok, - 'CLUSTER ADDSLOTS': bool_ok, - 'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x), - 'CLUSTER COUNTKEYSINSLOT': lambda x: int(x), - 'CLUSTER DELSLOTS': bool_ok, - 'CLUSTER FAILOVER': bool_ok, - 'CLUSTER FORGET': bool_ok, - 'CLUSTER INFO': parse_cluster_info, - 'CLUSTER KEYSLOT': lambda x: int(x), - 'CLUSTER MEET': bool_ok, - 'CLUSTER NODES': parse_cluster_nodes, - 'CLUSTER REPLICATE': bool_ok, - 'CLUSTER RESET': bool_ok, - 'CLUSTER SAVECONFIG': bool_ok, - 'CLUSTER SET-CONFIG-EPOCH': bool_ok, - 'CLUSTER SETSLOT': bool_ok, - 'CLUSTER SLAVES': parse_cluster_nodes, - 'CONFIG GET': parse_config_get, - 'CONFIG RESETSTAT': bool_ok, - 'CONFIG SET': bool_ok, - 'DEBUG OBJECT': parse_debug_object, - 'GEOHASH': lambda r: list(map(nativestr_or_none, r)), - 'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]), - float(ll[1])) - if ll is not None else None, r)), - 'GEORADIUS': parse_georadius_generic, - 'GEORADIUSBYMEMBER': parse_georadius_generic, - 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, - 'HSCAN': parse_hscan, - 'INFO': parse_info, - 'LASTSAVE': timestamp_to_datetime, - 'MEMORY PURGE': bool_ok, - 'MEMORY USAGE': int_or_none, - 'OBJECT': parse_object, - 'PING': lambda r: nativestr(r) == 'PONG', - 'PUBSUB NUMSUB': parse_pubsub_numsub, - 'RANDOMKEY': lambda r: r and r or None, - 'SCAN': parse_scan, - 'SCRIPT EXISTS': lambda r: list(imap(bool, r)), - 'SCRIPT FLUSH': bool_ok, - 'SCRIPT KILL': bool_ok, - 'SCRIPT LOAD': nativestr, - 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master, - 'SENTINEL MASTER': parse_sentinel_master, - 'SENTINEL MASTERS': parse_sentinel_masters, - 'SENTINEL MONITOR': bool_ok, - 'SENTINEL REMOVE': bool_ok, - 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels, - 'SENTINEL SET': bool_ok, - 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels, - 'SET': lambda r: r and nativestr(r) == 'OK', - 'SLOWLOG GET': parse_slowlog_get, - 'SLOWLOG LEN': int, - 'SLOWLOG RESET': bool_ok, - 'SSCAN': parse_scan, - 'TIME': lambda x: (int(x[0]), int(x[1])), - 'XCLAIM': parse_xclaim, - 'XGROUP CREATE': bool_ok, - 'XGROUP DELCONSUMER': int, - 'XGROUP DESTROY': bool, - 'XGROUP SETID': bool_ok, - 'XINFO CONSUMERS': parse_list_of_dicts, - 'XINFO GROUPS': parse_list_of_dicts, - 'XINFO STREAM': parse_xinfo_stream, - 'XPENDING': parse_xpending, - 'ZADD': parse_zadd, - 'ZSCAN': parse_zscan, - } - ) - - @classmethod - def from_url(cls, url, db=None, **kwargs): - """ - Return a Redis client object configured from the given URL - - For example:: - - redis://[:password]@localhost:6379/0 - rediss://[:password]@localhost:6379/0 - unix://[:password]@/path/to/socket.sock?db=0 - - Three URL schemes are supported: - - - ```redis://`` - `_ creates a - normal TCP socket connection - - ```rediss://`` - `_ creates a - SSL wrapped TCP socket connection - - ``unix://`` creates a Unix Domain Socket connection - - There are several ways to specify a database number. The parse function - will return the first specified option: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 - 2. If using the redis:// scheme, the path argument of the url, e.g. - redis://localhost/0 - 3. The ``db`` argument to this function. - - If none of these options are specified, db=0 is used. - - Any additional querystring arguments and keyword arguments will be - passed along to the ConnectionPool class's initializer. In the case - of conflicting arguments, querystring arguments always win. - """ - connection_pool = ConnectionPool.from_url(url, db=db, **kwargs) - return cls(connection_pool=connection_pool) - - def __init__(self, host='localhost', port=6379, - db=0, password=None, socket_timeout=None, - socket_connect_timeout=None, - socket_keepalive=None, socket_keepalive_options=None, - connection_pool=None, unix_socket_path=None, - encoding='utf-8', encoding_errors='strict', - charset=None, errors=None, - decode_responses=False, retry_on_timeout=False, - ssl=False, ssl_keyfile=None, ssl_certfile=None, - ssl_cert_reqs='required', ssl_ca_certs=None, - max_connections=None, single_connection_client=False, - health_check_interval=0): - if not connection_pool: - if charset is not None: - warnings.warn(DeprecationWarning( - '"charset" is deprecated. Use "encoding" instead')) - encoding = charset - if errors is not None: - warnings.warn(DeprecationWarning( - '"errors" is deprecated. Use "encoding_errors" instead')) - encoding_errors = errors - - kwargs = { - 'db': db, - 'password': password, - 'socket_timeout': socket_timeout, - 'encoding': encoding, - 'encoding_errors': encoding_errors, - 'decode_responses': decode_responses, - 'retry_on_timeout': retry_on_timeout, - 'max_connections': max_connections, - 'health_check_interval': health_check_interval, - } - # based on input, setup appropriate connection args - if unix_socket_path is not None: - kwargs.update({ - 'path': unix_socket_path, - 'connection_class': UnixDomainSocketConnection - }) - else: - # TCP specific options - kwargs.update({ - 'host': host, - 'port': port, - 'socket_connect_timeout': socket_connect_timeout, - 'socket_keepalive': socket_keepalive, - 'socket_keepalive_options': socket_keepalive_options, - }) - - if ssl: - kwargs.update({ - 'connection_class': SSLConnection, - 'ssl_keyfile': ssl_keyfile, - 'ssl_certfile': ssl_certfile, - 'ssl_cert_reqs': ssl_cert_reqs, - 'ssl_ca_certs': ssl_ca_certs, - }) - connection_pool = ConnectionPool(**kwargs) - self.connection_pool = connection_pool - self.connection = None - if single_connection_client: - self.connection = self.connection_pool.get_connection('_') - - self.response_callbacks = CaseInsensitiveDict( - self.__class__.RESPONSE_CALLBACKS) - - def __repr__(self): - return "%s<%s>" % (type(self).__name__, repr(self.connection_pool)) - - def set_response_callback(self, command, callback): - "Set a custom Response Callback" - self.response_callbacks[command] = callback - - def pipeline(self, transaction=True, shard_hint=None): - """ - Return a new pipeline object that can queue multiple commands for - later execution. ``transaction`` indicates whether all commands - should be executed atomically. Apart from making a group of operations - atomic, pipelines are useful for reducing the back-and-forth overhead - between the client and server. - """ - return Pipeline( - self.connection_pool, - self.response_callbacks, - transaction, - shard_hint) - - def transaction(self, func, *watches, **kwargs): - """ - Convenience method for executing the callable `func` as a transaction - while watching all keys specified in `watches`. The 'func' callable - should expect a single argument which is a Pipeline object. - """ - shard_hint = kwargs.pop('shard_hint', None) - value_from_callable = kwargs.pop('value_from_callable', False) - watch_delay = kwargs.pop('watch_delay', None) - with self.pipeline(True, shard_hint) as pipe: - while True: - try: - if watches: - pipe.watch(*watches) - func_value = func(pipe) - exec_value = pipe.execute() - return func_value if value_from_callable else exec_value - except WatchError: - if watch_delay is not None and watch_delay > 0: - time.sleep(watch_delay) - continue - - def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, - lock_class=None, thread_local=True): - """ - Return a new Lock object using key ``name`` that mimics - the behavior of threading.Lock. - - If specified, ``timeout`` indicates a maximum life for the lock. - By default, it will remain locked until release() is called. - - ``sleep`` indicates the amount of time to sleep per loop iteration - when the lock is in blocking mode and another client is currently - holding the lock. - - ``blocking_timeout`` indicates the maximum amount of time in seconds to - spend trying to acquire the lock. A value of ``None`` indicates - continue trying forever. ``blocking_timeout`` can be specified as a - float or integer, both representing the number of seconds to wait. - - ``lock_class`` forces the specified lock implementation. - - ``thread_local`` indicates whether the lock token is placed in - thread-local storage. By default, the token is placed in thread local - storage so that a thread only sees its token, not a token set by - another thread. Consider the following timeline: - - time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. - thread-1 sets the token to "abc" - time: 1, thread-2 blocks trying to acquire `my-lock` using the - Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock - key. - time: 5, thread-2 acquired `my-lock` now that it's available. - thread-2 sets the token to "xyz" - time: 6, thread-1 finishes its work and calls release(). if the - token is *not* stored in thread local storage, then - thread-1 would see the token value as "xyz" and would be - able to successfully release the thread-2's lock. - - In some use cases it's necessary to disable thread local storage. For - example, if you have code where one thread acquires a lock and passes - that lock instance to a worker thread to release later. If thread - local storage isn't disabled in this case, the worker thread won't see - the token set by the thread that acquired the lock. Our assumption - is that these cases aren't common and as such default to using - thread local storage. """ - if lock_class is None: - lock_class = Lock - return lock_class(self, name, timeout=timeout, sleep=sleep, - blocking_timeout=blocking_timeout, - thread_local=thread_local) - - def pubsub(self, **kwargs): - """ - Return a Publish/Subscribe object. With this object, you can - subscribe to channels and listen for messages that get published to - them. - """ - return PubSub(self.connection_pool, **kwargs) - - def monitor(self): - return Monitor(self.connection_pool) - - def client(self): - return self.__class__(connection_pool=self.connection_pool, - single_connection_client=True) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def __del__(self): - self.close() - - def close(self): - conn = self.connection - if conn: - self.connection = None - self.connection_pool.release(conn) - - # COMMAND EXECUTION AND PROTOCOL PARSING - def execute_command(self, *args, **options): - "Execute a command and return a parsed response" - pool = self.connection_pool - command_name = args[0] - conn = self.connection or pool.get_connection(command_name, **options) - try: - conn.send_command(*args) - return self.parse_response(conn, command_name, **options) - except (ConnectionError, TimeoutError) as e: - conn.disconnect() - if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): - raise - conn.send_command(*args) - return self.parse_response(conn, command_name, **options) - finally: - if not self.connection: - pool.release(conn) - - def parse_response(self, connection, command_name, **options): - "Parses a response from the Redis server" - try: - response = connection.read_response() - except ResponseError: - if EMPTY_RESPONSE in options: - return options[EMPTY_RESPONSE] - raise - if command_name in self.response_callbacks: - return self.response_callbacks[command_name](response, **options) - return response - - # SERVER INFORMATION - def bgrewriteaof(self): - "Tell the Redis server to rewrite the AOF file from data in memory." - return self.execute_command('BGREWRITEAOF') - - def bgsave(self): - """ - Tell the Redis server to save its data to disk. Unlike save(), - this method is asynchronous and returns immediately. - """ - return self.execute_command('BGSAVE') - - def client_kill(self, address): - "Disconnects the client at ``address`` (ip:port)" - return self.execute_command('CLIENT KILL', address) - - def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None): - """ - Disconnects client(s) using a variety of filter options - :param id: Kills a client by its unique ID field - :param type: Kills a client by type where type is one of 'normal', - 'master', 'slave' or 'pubsub' - :param addr: Kills a client by its 'address:port' - :param skipme: If True, then the client calling the command - will not get killed even if it is identified by one of the filter - options. If skipme is not provided, the server defaults to skipme=True - """ - args = [] - if _type is not None: - client_types = ('normal', 'master', 'slave', 'pubsub') - if str(_type).lower() not in client_types: - raise DataError("CLIENT KILL type must be one of %r" % ( - client_types,)) - args.extend((b'TYPE', _type)) - if skipme is not None: - if not isinstance(skipme, bool): - raise DataError("CLIENT KILL skipme must be a bool") - if skipme: - args.extend((b'SKIPME', b'YES')) - else: - args.extend((b'SKIPME', b'NO')) - if _id is not None: - args.extend((b'ID', _id)) - if addr is not None: - args.extend((b'ADDR', addr)) - if not args: - raise DataError("CLIENT KILL ... ... " - " must specify at least one filter") - return self.execute_command('CLIENT KILL', *args) - - def client_list(self, _type=None): - """ - Returns a list of currently connected clients. - If type of client specified, only that type will be returned. - :param _type: optional. one of the client types (normal, master, - replica, pubsub) - """ - "Returns a list of currently connected clients" - if _type is not None: - client_types = ('normal', 'master', 'replica', 'pubsub') - if str(_type).lower() not in client_types: - raise DataError("CLIENT LIST _type must be one of %r" % ( - client_types,)) - return self.execute_command('CLIENT LIST', b'TYPE', _type) - return self.execute_command('CLIENT LIST') - - def client_getname(self): - "Returns the current connection name" - return self.execute_command('CLIENT GETNAME') - - def client_id(self): - "Returns the current connection id" - return self.execute_command('CLIENT ID') - - def client_setname(self, name): - "Sets the current connection name" - return self.execute_command('CLIENT SETNAME', name) - - def client_unblock(self, client_id, error=False): - """ - Unblocks a connection by its client id. - If ``error`` is True, unblocks the client with a special error message. - If ``error`` is False (default), the client is unblocked using the - regular timeout mechanism. - """ - args = ['CLIENT UNBLOCK', int(client_id)] - if error: - args.append(b'ERROR') - return self.execute_command(*args) - - def client_pause(self, timeout): - """ - Suspend all the Redis clients for the specified amount of time - :param timeout: milliseconds to pause clients - """ - if not isinstance(timeout, (int, long)): - raise DataError("CLIENT PAUSE timeout must be an integer") - return self.execute_command('CLIENT PAUSE', str(timeout)) - - def readwrite(self): - "Disables read queries for a connection to a Redis Cluster slave node" - return self.execute_command('READWRITE') - - def readonly(self): - "Enables read queries for a connection to a Redis Cluster replica node" - return self.execute_command('READONLY') - - def config_get(self, pattern="*"): - "Return a dictionary of configuration based on the ``pattern``" - return self.execute_command('CONFIG GET', pattern) - - def config_set(self, name, value): - "Set config item ``name`` with ``value``" - return self.execute_command('CONFIG SET', name, value) - - def config_resetstat(self): - "Reset runtime statistics" - return self.execute_command('CONFIG RESETSTAT') - - def config_rewrite(self): - "Rewrite config file with the minimal change to reflect running config" - return self.execute_command('CONFIG REWRITE') - - def dbsize(self): - "Returns the number of keys in the current database" - return self.execute_command('DBSIZE') - - def debug_object(self, key): - "Returns version specific meta information about a given key" - return self.execute_command('DEBUG OBJECT', key) - - def echo(self, value): - "Echo the string back from the server" - return self.execute_command('ECHO', value) - - def flushall(self, asynchronous=False): - """ - Delete all keys in all databases on the current host. - - ``asynchronous`` indicates whether the operation is - executed asynchronously by the server. - """ - args = [] - if asynchronous: - args.append(b'ASYNC') - return self.execute_command('FLUSHALL', *args) - - def flushdb(self, asynchronous=False): - """ - Delete all keys in the current database. - - ``asynchronous`` indicates whether the operation is - executed asynchronously by the server. - """ - args = [] - if asynchronous: - args.append(b'ASYNC') - return self.execute_command('FLUSHDB', *args) - - def swapdb(self, first, second): - "Swap two databases" - return self.execute_command('SWAPDB', first, second) - - def info(self, section=None): - """ - Returns a dictionary containing information about the Redis server - - The ``section`` option can be used to select a specific section - of information - - The section option is not supported by older versions of Redis Server, - and will generate ResponseError - """ - if section is None: - return self.execute_command('INFO') - else: - return self.execute_command('INFO', section) - - def lastsave(self): - """ - Return a Python datetime object representing the last time the - Redis database was saved to disk - """ - return self.execute_command('LASTSAVE') - - def migrate(self, host, port, keys, destination_db, timeout, - copy=False, replace=False, auth=None): - """ - Migrate 1 or more keys from the current Redis server to a different - server specified by the ``host``, ``port`` and ``destination_db``. - - The ``timeout``, specified in milliseconds, indicates the maximum - time the connection between the two servers can be idle before the - command is interrupted. - - If ``copy`` is True, the specified ``keys`` are NOT deleted from - the source server. - - If ``replace`` is True, this operation will overwrite the keys - on the destination server if they exist. - - If ``auth`` is specified, authenticate to the destination server with - the password provided. - """ - keys = list_or_args(keys, []) - if not keys: - raise DataError('MIGRATE requires at least one key') - pieces = [] - if copy: - pieces.append(b'COPY') - if replace: - pieces.append(b'REPLACE') - if auth: - pieces.append(b'AUTH') - pieces.append(auth) - pieces.append(b'KEYS') - pieces.extend(keys) - return self.execute_command('MIGRATE', host, port, '', destination_db, - timeout, *pieces) - - def object(self, infotype, key): - "Return the encoding, idletime, or refcount about the key" - return self.execute_command('OBJECT', infotype, key, infotype=infotype) - - def memory_usage(self, key, samples=None): - """ - Return the total memory usage for key, its value and associated - administrative overheads. - - For nested data structures, ``samples`` is the number of elements to - sample. If left unspecified, the server's default is 5. Use 0 to sample - all elements. - """ - args = [] - if isinstance(samples, int): - args.extend([b'SAMPLES', samples]) - return self.execute_command('MEMORY USAGE', key, *args) - - def memory_purge(self): - "Attempts to purge dirty pages for reclamation by allocator" - return self.execute_command('MEMORY PURGE') - - def ping(self): - "Ping the Redis server" - return self.execute_command('PING') - - def save(self): - """ - Tell the Redis server to save its data to disk, - blocking until the save is complete - """ - return self.execute_command('SAVE') - - def sentinel(self, *args): - "Redis Sentinel's SENTINEL command." - warnings.warn( - DeprecationWarning('Use the individual sentinel_* methods')) - - def sentinel_get_master_addr_by_name(self, service_name): - "Returns a (host, port) pair for the given ``service_name``" - return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', - service_name) - - def sentinel_master(self, service_name): - "Returns a dictionary containing the specified masters state." - return self.execute_command('SENTINEL MASTER', service_name) - - def sentinel_masters(self): - "Returns a list of dictionaries containing each master's state." - return self.execute_command('SENTINEL MASTERS') - - def sentinel_monitor(self, name, ip, port, quorum): - "Add a new master to Sentinel to be monitored" - return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum) - - def sentinel_remove(self, name): - "Remove a master from Sentinel's monitoring" - return self.execute_command('SENTINEL REMOVE', name) - - def sentinel_sentinels(self, service_name): - "Returns a list of sentinels for ``service_name``" - return self.execute_command('SENTINEL SENTINELS', service_name) - - def sentinel_set(self, name, option, value): - "Set Sentinel monitoring parameters for a given master" - return self.execute_command('SENTINEL SET', name, option, value) - - def sentinel_slaves(self, service_name): - "Returns a list of slaves for ``service_name``" - return self.execute_command('SENTINEL SLAVES', service_name) - - def shutdown(self, save=False, nosave=False): - """Shutdown the Redis server. If Redis has persistence configured, - data will be flushed before shutdown. If the "save" option is set, - a data flush will be attempted even if there is no persistence - configured. If the "nosave" option is set, no data flush will be - attempted. The "save" and "nosave" options cannot both be set. - """ - if save and nosave: - raise DataError('SHUTDOWN save and nosave cannot both be set') - args = ['SHUTDOWN'] - if save: - args.append('SAVE') - if nosave: - args.append('NOSAVE') - try: - self.execute_command(*args) - except ConnectionError: - # a ConnectionError here is expected - return - raise RedisError("SHUTDOWN seems to have failed.") - - def slaveof(self, host=None, port=None): - """ - Set the server to be a replicated slave of the instance identified - by the ``host`` and ``port``. If called without arguments, the - instance is promoted to a master instead. - """ - if host is None and port is None: - return self.execute_command('SLAVEOF', b'NO', b'ONE') - return self.execute_command('SLAVEOF', host, port) - - def slowlog_get(self, num=None): - """ - Get the entries from the slowlog. If ``num`` is specified, get the - most recent ``num`` items. - """ - args = ['SLOWLOG GET'] - if num is not None: - args.append(num) - return self.execute_command(*args) - - def slowlog_len(self): - "Get the number of items in the slowlog" - return self.execute_command('SLOWLOG LEN') - - def slowlog_reset(self): - "Remove all items in the slowlog" - return self.execute_command('SLOWLOG RESET') - - def time(self): - """ - Returns the server time as a 2-item tuple of ints: - (seconds since epoch, microseconds into this second). - """ - return self.execute_command('TIME') - - def wait(self, num_replicas, timeout): - """ - Redis synchronous replication - That returns the number of replicas that processed the query when - we finally have at least ``num_replicas``, or when the ``timeout`` was - reached. - """ - return self.execute_command('WAIT', num_replicas, timeout) - - # BASIC KEY COMMANDS - def append(self, key, value): - """ - Appends the string ``value`` to the value at ``key``. If ``key`` - doesn't already exist, create it with a value of ``value``. - Returns the new length of the value at ``key``. - """ - return self.execute_command('APPEND', key, value) - - def bitcount(self, key, start=None, end=None): - """ - Returns the count of set bits in the value of ``key``. Optional - ``start`` and ``end`` paramaters indicate which bytes to consider - """ - params = [key] - if start is not None and end is not None: - params.append(start) - params.append(end) - elif (start is not None and end is None) or \ - (end is not None and start is None): - raise DataError("Both start and end must be specified") - return self.execute_command('BITCOUNT', *params) - - def bitfield(self, key, default_overflow=None): - """ - Return a BitFieldOperation instance to conveniently construct one or - more bitfield operations on ``key``. - """ - return BitFieldOperation(self, key, default_overflow=default_overflow) - - def bitop(self, operation, dest, *keys): - """ - Perform a bitwise operation using ``operation`` between ``keys`` and - store the result in ``dest``. - """ - return self.execute_command('BITOP', operation, dest, *keys) - - def bitpos(self, key, bit, start=None, end=None): - """ - Return the position of the first bit set to 1 or 0 in a string. - ``start`` and ``end`` difines search range. The range is interpreted - as a range of bytes and not a range of bits, so start=0 and end=2 - means to look at the first three bytes. - """ - if bit not in (0, 1): - raise DataError('bit must be 0 or 1') - params = [key, bit] - - start is not None and params.append(start) - - if start is not None and end is not None: - params.append(end) - elif start is None and end is not None: - raise DataError("start argument is not set, " - "when end is specified") - return self.execute_command('BITPOS', *params) - - def decr(self, name, amount=1): - """ - Decrements the value of ``key`` by ``amount``. If no key exists, - the value will be initialized as 0 - ``amount`` - """ - # An alias for ``decr()``, because it is already implemented - # as DECRBY redis command. - return self.decrby(name, amount) - - def decrby(self, name, amount=1): - """ - Decrements the value of ``key`` by ``amount``. If no key exists, - the value will be initialized as 0 - ``amount`` - """ - return self.execute_command('DECRBY', name, amount) - - def delete(self, *names): - "Delete one or more keys specified by ``names``" - return self.execute_command('DEL', *names) - - def __delitem__(self, name): - self.delete(name) - - def dump(self, name): - """ - 返回存储在指定键上的值的序列化版本。 - 如果键不存在,则返回零批量答复。 - """ - return self.execute_command('DUMP', name) - - def exists(self, *names): - "返回存在的“名称”的数目 Returns the number of ``names`` that exist" - return self.execute_command('EXISTS', *names) - __contains__ = exists - - def expire(self, name, time): - """ - 在“name”键上设置“time”秒的过期标志。``时间`` -可以用整数或python timedelta对象表示。 -Set an expire flag on key ``name`` for ``time`` seconds. ``time`` - can be represented by an integer or a Python timedelta object. - """ - if isinstance(time, datetime.timedelta): - time = int(time.total_seconds()) - return self.execute_command('EXPIRE', name, time) - - def expireat(self, name, when): - """ - Set an expire flag on key ``name``. ``when`` can be represented - as an integer indicating unix time or a Python datetime object. - """ - if isinstance(when, datetime.datetime): - when = int(mod_time.mktime(when.timetuple())) - return self.execute_command('EXPIREAT', name, when) - - def get(self, name): - """获取name的值 - - name,键 - 返回键“name”处的值,如果该键不存在,则返回“none” - """ - return self.execute_command('GET', name) - - def __getitem__(self, name): - """ - 返回键“name”处的值,如果键不存在。 - """ - value = self.get(name) - if value is not None: - return value - raise KeyError(name) - - def getbit(self, name, offset): - "Returns a boolean indicating the value of ``offset`` in ``name``" - return self.execute_command('GETBIT', name, offset) - - def getrange(self, key, start, end): - """ - Returns the substring of the string value stored at ``key``, - determined by the offsets ``start`` and ``end`` (both are inclusive) - """ - return self.execute_command('GETRANGE', key, start, end) - - def getset(self, name, value): - """ - Sets the value at key ``name`` to ``value`` - and returns the old value at key ``name`` atomically. - """ - return self.execute_command('GETSET', name, value) - - def incr(self, name, amount=1): - """ - Increments the value of ``key`` by ``amount``. If no key exists, - the value will be initialized as ``amount`` - """ - return self.incrby(name, amount) - - def incrby(self, name, amount=1): - """ - Increments the value of ``key`` by ``amount``. If no key exists, - the value will be initialized as ``amount`` - """ - # An alias for ``incr()``, because it is already implemented - # as INCRBY redis command. - return self.execute_command('INCRBY', name, amount) - - def incrbyfloat(self, name, amount=1.0): - """ - Increments the value at key ``name`` by floating ``amount``. - If no key exists, the value will be initialized as ``amount`` - """ - return self.execute_command('INCRBYFLOAT', name, amount) - - def keys(self, pattern='*'): - "Returns a list of keys matching ``pattern``" - return self.execute_command('KEYS', pattern) - - def mget(self, keys, *args): - """ - Returns a list of values ordered identically to ``keys`` - """ - args = list_or_args(keys, args) - options = {} - if not args: - options[EMPTY_RESPONSE] = [] - return self.execute_command('MGET', *args, **options) - - def mset(self, mapping): - """ - Sets key/values based on a mapping. Mapping is a dictionary of - key/value pairs. Both keys and values should be strings or types that - can be cast to a string via str(). - """ - items = [] - for pair in iteritems(mapping): - items.extend(pair) - return self.execute_command('MSET', *items) - - def msetnx(self, mapping): - """ - Sets key/values based on a mapping if none of the keys are already set. - Mapping is a dictionary of key/value pairs. Both keys and values - should be strings or types that can be cast to a string via str(). - Returns a boolean indicating if the operation was successful. - """ - items = [] - for pair in iteritems(mapping): - items.extend(pair) - return self.execute_command('MSETNX', *items) - - def move(self, name, db): - "Moves the key ``name`` to a different Redis database ``db``" - return self.execute_command('MOVE', name, db) - - def persist(self, name): - "Removes an expiration on ``name``" - return self.execute_command('PERSIST', name) - - def pexpire(self, name, time): - """ - Set an expire flag on key ``name`` for ``time`` milliseconds. - ``time`` can be represented by an integer or a Python timedelta - object. - """ - if isinstance(time, datetime.timedelta): - time = int(time.total_seconds() * 1000) - return self.execute_command('PEXPIRE', name, time) - - def pexpireat(self, name, when): - """ - Set an expire flag on key ``name``. ``when`` can be represented - as an integer representing unix time in milliseconds (unix time * 1000) - or a Python datetime object. - """ - if isinstance(when, datetime.datetime): - ms = int(when.microsecond / 1000) - when = int(mod_time.mktime(when.timetuple())) * 1000 + ms - return self.execute_command('PEXPIREAT', name, when) - - def psetex(self, name, time_ms, value): - """ - Set the value of key ``name`` to ``value`` that expires in ``time_ms`` - milliseconds. ``time_ms`` can be represented by an integer or a Python - timedelta object - """ - if isinstance(time_ms, datetime.timedelta): - time_ms = int(time_ms.total_seconds() * 1000) - return self.execute_command('PSETEX', name, time_ms, value) - - def pttl(self, name): - "Returns the number of milliseconds until the key ``name`` will expire" - return self.execute_command('PTTL', name) - - def randomkey(self): - "Returns the name of a random key" - return self.execute_command('RANDOMKEY') - - def rename(self, src, dst): - """ - Rename key ``src`` to ``dst`` - """ - return self.execute_command('RENAME', src, dst) - - def renamenx(self, src, dst): - "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" - return self.execute_command('RENAMENX', src, dst) - - def restore(self, name, ttl, value, replace=False): - """ - Create a key using the provided serialized value, previously obtained - using DUMP. - """ - params = [name, ttl, value] - if replace: - params.append('REPLACE') - return self.execute_command('RESTORE', *params) - - def set(self, name, value, ex=None, px=None, nx=False, xx=False): - """ -name,键 - -value,值 - -ex,过期时间(秒) - -px,过期时间(毫秒) - -nx,如果设置为True,则只有key不存在时,当前set操作才执行,同#setnx(key, value) - -xx,如果设置为True,则只有key存在时,当前set操作才执行 - Set the value at key ``name`` to ``value`` - - ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. - - ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. - - ``nx`` if set to True, set the value at key ``name`` to ``value`` only - if it does not exist. - - ``xx`` if set to True, set the value at key ``name`` to ``value`` only - if it already exists. - """ - pieces = [name, value] - if ex is not None: - pieces.append('EX') - if isinstance(ex, datetime.timedelta): - ex = int(ex.total_seconds()) - pieces.append(ex) - if px is not None: - pieces.append('PX') - if isinstance(px, datetime.timedelta): - px = int(px.total_seconds() * 1000) - pieces.append(px) - - if nx: - pieces.append('NX') - if xx: - pieces.append('XX') - return self.execute_command('SET', *pieces) - - def __setitem__(self, name, value): - self.set(name, value) - - def setbit(self, name, offset, value): - """ - Flag the ``offset`` in ``name`` as ``value``. Returns a boolean - indicating the previous value of ``offset``. - """ - value = value and 1 or 0 - return self.execute_command('SETBIT', name, offset, value) - - def setex(self, name, time, value): - """ - Set the value of key ``name`` to ``value`` that expires in ``time`` - seconds. ``time`` can be represented by an integer or a Python - timedelta object. - """ - if isinstance(time, datetime.timedelta): - time = int(time.total_seconds()) - return self.execute_command('SETEX', name, time, value) - - def setnx(self, name, value): - "Set the value of key ``name`` to ``value`` if key doesn't exist" - return self.execute_command('SETNX', name, value) - - def setrange(self, name, offset, value): - """ - Overwrite bytes in the value of ``name`` starting at ``offset`` with - ``value``. If ``offset`` plus the length of ``value`` exceeds the - length of the original value, the new value will be larger than before. - If ``offset`` exceeds the length of the original value, null bytes - will be used to pad between the end of the previous value and the start - of what's being injected. - - Returns the length of the new string. - """ - return self.execute_command('SETRANGE', name, offset, value) - - def strlen(self, name): - "Return the number of bytes stored in the value of ``name``" - return self.execute_command('STRLEN', name) - - def substr(self, name, start, end=-1): - """ - Return a substring of the string at key ``name``. ``start`` and ``end`` - are 0-based integers specifying the portion of the string to return. - """ - return self.execute_command('SUBSTR', name, start, end) - - def touch(self, *args): - """ - Alters the last access time of a key(s) ``*args``. A key is ignored - if it does not exist. - """ - return self.execute_command('TOUCH', *args) - - def ttl(self, name): - "Returns the number of seconds until the key ``name`` will expire" - return self.execute_command('TTL', name) - - def type(self, name): - "Returns the type of key ``name``" - return self.execute_command('TYPE', name) - - def watch(self, *names): - """ - Watches the values at keys ``names``, or None if the key doesn't exist - """ - warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object')) - - def unwatch(self): - """ - Unwatches the value at key ``name``, or None of the key doesn't exist - """ - warnings.warn( - DeprecationWarning('Call UNWATCH from a Pipeline object')) - - def unlink(self, *names): - "Unlink one or more keys specified by ``names``" - return self.execute_command('UNLINK', *names) - - # LIST COMMANDS - def blpop(self, keys, timeout=0): - """ - LPOP a value off of the first non-empty list - named in the ``keys`` list. - - If none of the lists in ``keys`` has a value to LPOP, then block - for ``timeout`` seconds, or until a value gets pushed on to one - of the lists. - - If timeout is 0, then block indefinitely. - """ - if timeout is None: - timeout = 0 - keys = list_or_args(keys, None) - keys.append(timeout) - return self.execute_command('BLPOP', *keys) - - def brpop(self, keys, timeout=0): - """ - RPOP a value off of the first non-empty list - named in the ``keys`` list. - - If none of the lists in ``keys`` has a value to RPOP, then block - for ``timeout`` seconds, or until a value gets pushed on to one - of the lists. - - If timeout is 0, then block indefinitely. - """ - if timeout is None: - timeout = 0 - keys = list_or_args(keys, None) - keys.append(timeout) - return self.execute_command('BRPOP', *keys) - - def brpoplpush(self, src, dst, timeout=0): - """ - Pop a value off the tail of ``src``, push it on the head of ``dst`` - and then return it. - - This command blocks until a value is in ``src`` or until ``timeout`` - seconds elapse, whichever is first. A ``timeout`` value of 0 blocks - forever. - """ - if timeout is None: - timeout = 0 - return self.execute_command('BRPOPLPUSH', src, dst, timeout) - - def lindex(self, name, index): - """ - Return the item from list ``name`` at position ``index`` - - Negative indexes are supported and will return an item at the - end of the list - """ - return self.execute_command('LINDEX', name, index) - - def linsert(self, name, where, refvalue, value): - """ - Insert ``value`` in list ``name`` either immediately before or after - [``where``] ``refvalue`` - - Returns the new length of the list on success or -1 if ``refvalue`` - is not in the list. - """ - return self.execute_command('LINSERT', name, where, refvalue, value) - - def llen(self, name): - "Return the length of the list ``name``" - return self.execute_command('LLEN', name) - - def lpop(self, name): - """元素从list的左边移出 - - name,键 - """ - return self.execute_command('LPOP', name) - - def lpush(self, name, *values): - "元素从list的左边添加,可以添加多个" - return self.execute_command('LPUSH', name, *values) - - def lpushx(self, name, value): - "当name存在时,元素才能从list的右边加入" - return self.execute_command('LPUSHX', name, value) - - def lrange(self, name, start, end): - """ - Return a slice of the list ``name`` between - position ``start`` and ``end`` - - ``start`` and ``end`` can be negative numbers just like - Python slicing notation - """ - return self.execute_command('LRANGE', name, start, end) - - def lrem(self, name, count, value): - """ - Remove the first ``count`` occurrences of elements equal to ``value`` - from the list stored at ``name``. - - The count argument influences the operation in the following ways: - count > 0: Remove elements equal to value moving from head to tail. - count < 0: Remove elements equal to value moving from tail to head. - count = 0: Remove all elements equal to value. - """ - return self.execute_command('LREM', name, count, value) - - def lset(self, name, index, value): - "Set ``position`` of list ``name`` to ``value``" - return self.execute_command('LSET', name, index, value) - - def ltrim(self, name, start, end): - """ - Trim the list ``name``, removing all values not within the slice - between ``start`` and ``end`` - - ``start`` and ``end`` can be negative numbers just like - Python slicing notation - """ - return self.execute_command('LTRIM', name, start, end) - - def rpop(self, name): - "元素从list的右边移出" - return self.execute_command('RPOP', name) - - def rpoplpush(self, src, dst): - """ - RPOP a value off of the ``src`` list and atomically LPUSH it - on to the ``dst`` list. Returns the value. - """ - return self.execute_command('RPOPLPUSH', src, dst) - - def rpush(self, name, *values): - "元素从list的右边添加" - return self.execute_command('RPUSH', name, *values) - - def rpushx(self, name, value): - "当name存在时,元素才能从list的右边加入" - return self.execute_command('RPUSHX', name, value) - - def sort(self, name, start=None, num=None, by=None, get=None, - desc=False, alpha=False, store=None, groups=False): - """ - Sort and return the list, set or sorted set at ``name``. - - ``start`` and ``num`` allow for paging through the sorted data - - ``by`` allows using an external key to weight and sort the items. - Use an "*" to indicate where in the key the item value is located - - ``get`` allows for returning items from external keys rather than the - sorted data itself. Use an "*" to indicate where int he key - the item value is located - - ``desc`` allows for reversing the sort - - ``alpha`` allows for sorting lexicographically rather than numerically - - ``store`` allows for storing the result of the sort into - the key ``store`` - - ``groups`` if set to True and if ``get`` contains at least two - elements, sort will return a list of tuples, each containing the - values fetched from the arguments to ``get``. - - """ - if (start is not None and num is None) or \ - (num is not None and start is None): - raise DataError("``start`` and ``num`` must both be specified") - - pieces = [name] - if by is not None: - pieces.append(b'BY') - pieces.append(by) - if start is not None and num is not None: - pieces.append(b'LIMIT') - pieces.append(start) - pieces.append(num) - if get is not None: - # If get is a string assume we want to get a single value. - # Otherwise assume it's an interable and we want to get multiple - # values. We can't just iterate blindly because strings are - # iterable. - if isinstance(get, (bytes, basestring)): - pieces.append(b'GET') - pieces.append(get) - else: - for g in get: - pieces.append(b'GET') - pieces.append(g) - if desc: - pieces.append(b'DESC') - if alpha: - pieces.append(b'ALPHA') - if store is not None: - pieces.append(b'STORE') - pieces.append(store) - - if groups: - if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: - raise DataError('when using "groups" the "get" argument ' - 'must be specified and contain at least ' - 'two keys') - - options = {'groups': len(get) if groups else None} - return self.execute_command('SORT', *pieces, **options) - - # SCAN COMMANDS - def scan(self, cursor=0, match=None, count=None): - """ - Incrementally return lists of key names. Also return a cursor - indicating the scan position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - pieces = [cursor] - if match is not None: - pieces.extend([b'MATCH', match]) - if count is not None: - pieces.extend([b'COUNT', count]) - return self.execute_command('SCAN', *pieces) - - def scan_iter(self, match=None, count=None): - """ - Make an iterator using the SCAN command so that the client doesn't - need to remember the cursor position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - cursor = '0' - while cursor != 0: - cursor, data = self.scan(cursor=cursor, match=match, count=count) - for item in data: - yield item - - def sscan(self, name, cursor=0, match=None, count=None): - """ - Incrementally return lists of elements in a set. Also return a cursor - indicating the scan position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - pieces = [name, cursor] - if match is not None: - pieces.extend([b'MATCH', match]) - if count is not None: - pieces.extend([b'COUNT', count]) - return self.execute_command('SSCAN', *pieces) - - def sscan_iter(self, name, match=None, count=None): - """ - Make an iterator using the SSCAN command so that the client doesn't - need to remember the cursor position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - cursor = '0' - while cursor != 0: - cursor, data = self.sscan(name, cursor=cursor, - match=match, count=count) - for item in data: - yield item - - def hscan(self, name, cursor=0, match=None, count=None): - """ - Incrementally return key/value slices in a hash. Also return a cursor - indicating the scan position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - pieces = [name, cursor] - if match is not None: - pieces.extend([b'MATCH', match]) - if count is not None: - pieces.extend([b'COUNT', count]) - return self.execute_command('HSCAN', *pieces) - - def hscan_iter(self, name, match=None, count=None): - """ - Make an iterator using the HSCAN command so that the client doesn't - need to remember the cursor position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - """ - cursor = '0' - while cursor != 0: - cursor, data = self.hscan(name, cursor=cursor, - match=match, count=count) - for item in data.items(): - yield item - - def zscan(self, name, cursor=0, match=None, count=None, - score_cast_func=float): - """ - Incrementally return lists of elements in a sorted set. Also return a - cursor indicating the scan position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - - ``score_cast_func`` a callable used to cast the score return value - """ - pieces = [name, cursor] - if match is not None: - pieces.extend([b'MATCH', match]) - if count is not None: - pieces.extend([b'COUNT', count]) - options = {'score_cast_func': score_cast_func} - return self.execute_command('ZSCAN', *pieces, **options) - - def zscan_iter(self, name, match=None, count=None, - score_cast_func=float): - """ - Make an iterator using the ZSCAN command so that the client doesn't - need to remember the cursor position. - - ``match`` allows for filtering the keys by pattern - - ``count`` allows for hint the minimum number of returns - - ``score_cast_func`` a callable used to cast the score return value - """ - cursor = '0' - while cursor != 0: - cursor, data = self.zscan(name, cursor=cursor, match=match, - count=count, - score_cast_func=score_cast_func) - for item in data: - yield item - - # SET COMMANDS - def sadd(self, name, *values): - "Add ``value(s)`` to set ``name``" - return self.execute_command('SADD', name, *values) - - def scard(self, name): - "Return the number of elements in set ``name``" - return self.execute_command('SCARD', name) - - def sdiff(self, keys, *args): - "Return the difference of sets specified by ``keys``" - args = list_or_args(keys, args) - return self.execute_command('SDIFF', *args) - - def sdiffstore(self, dest, keys, *args): - """ - Store the difference of sets specified by ``keys`` into a new - set named ``dest``. Returns the number of keys in the new set. - """ - args = list_or_args(keys, args) - return self.execute_command('SDIFFSTORE', dest, *args) - - def sinter(self, keys, *args): - "Return the intersection of sets specified by ``keys``" - args = list_or_args(keys, args) - return self.execute_command('SINTER', *args) - - def sinterstore(self, dest, keys, *args): - """ - Store the intersection of sets specified by ``keys`` into a new - set named ``dest``. Returns the number of keys in the new set. - """ - args = list_or_args(keys, args) - return self.execute_command('SINTERSTORE', dest, *args) - - def sismember(self, name, value): - "Return a boolean indicating if ``value`` is a member of set ``name``" - return self.execute_command('SISMEMBER', name, value) - - def smembers(self, name): - "Return all members of the set ``name``" - return self.execute_command('SMEMBERS', name) - - def smove(self, src, dst, value): - "Move ``value`` from set ``src`` to set ``dst`` atomically" - return self.execute_command('SMOVE', src, dst, value) - - def spop(self, name, count=None): - "Remove and return a random member of set ``name``" - args = (count is not None) and [count] or [] - return self.execute_command('SPOP', name, *args) - - def srandmember(self, name, number=None): - """ - If ``number`` is None, returns a random member of set ``name``. - - If ``number`` is supplied, returns a list of ``number`` random - memebers of set ``name``. Note this is only available when running - Redis 2.6+. - """ - args = (number is not None) and [number] or [] - return self.execute_command('SRANDMEMBER', name, *args) - - def srem(self, name, *values): - "Remove ``values`` from set ``name``" - return self.execute_command('SREM', name, *values) - - def sunion(self, keys, *args): - "Return the union of sets specified by ``keys``" - args = list_or_args(keys, args) - return self.execute_command('SUNION', *args) - - def sunionstore(self, dest, keys, *args): - """ - Store the union of sets specified by ``keys`` into a new - set named ``dest``. Returns the number of keys in the new set. - """ - args = list_or_args(keys, args) - return self.execute_command('SUNIONSTORE', dest, *args) - - # STREAMS COMMANDS - def xack(self, name, groupname, *ids): - """ - Acknowledges the successful processing of one or more messages. - name: name of the stream. - groupname: name of the consumer group. - *ids: message ids to acknowlege. - """ - return self.execute_command('XACK', name, groupname, *ids) - - def xadd(self, name, fields, id='*', maxlen=None, approximate=True): - """ - Add to a stream. - name: name of the stream - fields: dict of field/value pairs to insert into the stream - id: Location to insert this record. By default it is appended. - maxlen: truncate old stream members beyond this size - approximate: actual stream length may be slightly more than maxlen - - """ - pieces = [] - if maxlen is not None: - if not isinstance(maxlen, (int, long)) or maxlen < 1: - raise DataError('XADD maxlen must be a positive integer') - pieces.append(b'MAXLEN') - if approximate: - pieces.append(b'~') - pieces.append(str(maxlen)) - pieces.append(id) - if not isinstance(fields, dict) or len(fields) == 0: - raise DataError('XADD fields must be a non-empty dict') - for pair in iteritems(fields): - pieces.extend(pair) - return self.execute_command('XADD', name, *pieces) - - def xclaim(self, name, groupname, consumername, min_idle_time, message_ids, - idle=None, time=None, retrycount=None, force=False, - justid=False): - """ - Changes the ownership of a pending message. - name: name of the stream. - groupname: name of the consumer group. - consumername: name of a consumer that claims the message. - min_idle_time: filter messages that were idle less than this amount of - milliseconds - message_ids: non-empty list or tuple of message IDs to claim - idle: optional. Set the idle time (last time it was delivered) of the - message in ms - time: optional integer. This is the same as idle but instead of a - relative amount of milliseconds, it sets the idle time to a specific - Unix time (in milliseconds). - retrycount: optional integer. set the retry counter to the specified - value. This counter is incremented every time a message is delivered - again. - force: optional boolean, false by default. Creates the pending message - entry in the PEL even if certain specified IDs are not already in the - PEL assigned to a different client. - justid: optional boolean, false by default. Return just an array of IDs - of messages successfully claimed, without returning the actual message - """ - if not isinstance(min_idle_time, (int, long)) or min_idle_time < 0: - raise DataError("XCLAIM min_idle_time must be a non negative " - "integer") - if not isinstance(message_ids, (list, tuple)) or not message_ids: - raise DataError("XCLAIM message_ids must be a non empty list or " - "tuple of message IDs to claim") - - kwargs = {} - pieces = [name, groupname, consumername, str(min_idle_time)] - pieces.extend(list(message_ids)) - - if idle is not None: - if not isinstance(idle, (int, long)): - raise DataError("XCLAIM idle must be an integer") - pieces.extend((b'IDLE', str(idle))) - if time is not None: - if not isinstance(time, (int, long)): - raise DataError("XCLAIM time must be an integer") - pieces.extend((b'TIME', str(time))) - if retrycount is not None: - if not isinstance(retrycount, (int, long)): - raise DataError("XCLAIM retrycount must be an integer") - pieces.extend((b'RETRYCOUNT', str(retrycount))) - - if force: - if not isinstance(force, bool): - raise DataError("XCLAIM force must be a boolean") - pieces.append(b'FORCE') - if justid: - if not isinstance(justid, bool): - raise DataError("XCLAIM justid must be a boolean") - pieces.append(b'JUSTID') - kwargs['parse_justid'] = True - return self.execute_command('XCLAIM', *pieces, **kwargs) - - def xdel(self, name, *ids): - """ - Deletes one or more messages from a stream. - name: name of the stream. - *ids: message ids to delete. - """ - return self.execute_command('XDEL', name, *ids) - - def xgroup_create(self, name, groupname, id='$', mkstream=False): - """ - Create a new consumer group associated with a stream. - name: name of the stream. - groupname: name of the consumer group. - id: ID of the last item in the stream to consider already delivered. - """ - pieces = ['XGROUP CREATE', name, groupname, id] - if mkstream: - pieces.append(b'MKSTREAM') - return self.execute_command(*pieces) - - def xgroup_delconsumer(self, name, groupname, consumername): - """ - Remove a specific consumer from a consumer group. - Returns the number of pending messages that the consumer had before it - was deleted. - name: name of the stream. - groupname: name of the consumer group. - consumername: name of consumer to delete - """ - return self.execute_command('XGROUP DELCONSUMER', name, groupname, - consumername) - - def xgroup_destroy(self, name, groupname): - """ - Destroy a consumer group. - name: name of the stream. - groupname: name of the consumer group. - """ - return self.execute_command('XGROUP DESTROY', name, groupname) - - def xgroup_setid(self, name, groupname, id): - """ - Set the consumer group last delivered ID to something else. - name: name of the stream. - groupname: name of the consumer group. - id: ID of the last item in the stream to consider already delivered. - """ - return self.execute_command('XGROUP SETID', name, groupname, id) - - def xinfo_consumers(self, name, groupname): - """ - Returns general information about the consumers in the group. - name: name of the stream. - groupname: name of the consumer group. - """ - return self.execute_command('XINFO CONSUMERS', name, groupname) - - def xinfo_groups(self, name): - """ - Returns general information about the consumer groups of the stream. - name: name of the stream. - """ - return self.execute_command('XINFO GROUPS', name) - - def xinfo_stream(self, name): - """ - Returns general information about the stream. - name: name of the stream. - """ - return self.execute_command('XINFO STREAM', name) - - def xlen(self, name): - """ - Returns the number of elements in a given stream. - """ - return self.execute_command('XLEN', name) - - def xpending(self, name, groupname): - """ - Returns information about pending messages of a group. - name: name of the stream. - groupname: name of the consumer group. - """ - return self.execute_command('XPENDING', name, groupname) - - def xpending_range(self, name, groupname, min, max, count, - consumername=None): - """ - Returns information about pending messages, in a range. - name: name of the stream. - groupname: name of the consumer group. - min: minimum stream ID. - max: maximum stream ID. - count: number of messages to return - consumername: name of a consumer to filter by (optional). - """ - pieces = [name, groupname] - if min is not None or max is not None or count is not None: - if min is None or max is None or count is None: - raise DataError("XPENDING must be provided with min, max " - "and count parameters, or none of them. ") - if not isinstance(count, (int, long)) or count < -1: - raise DataError("XPENDING count must be a integer >= -1") - pieces.extend((min, max, str(count))) - if consumername is not None: - if min is None or max is None or count is None: - raise DataError("if XPENDING is provided with consumername," - " it must be provided with min, max and" - " count parameters") - pieces.append(consumername) - return self.execute_command('XPENDING', *pieces, parse_detail=True) - - def xrange(self, name, min='-', max='+', count=None): - """ - Read stream values within an interval. - name: name of the stream. - start: first stream ID. defaults to '-', - meaning the earliest available. - finish: last stream ID. defaults to '+', - meaning the latest available. - count: if set, only return this many items, beginning with the - earliest available. - """ - pieces = [min, max] - if count is not None: - if not isinstance(count, (int, long)) or count < 1: - raise DataError('XRANGE count must be a positive integer') - pieces.append(b'COUNT') - pieces.append(str(count)) - - return self.execute_command('XRANGE', name, *pieces) - - def xread(self, streams, count=None, block=None): - """ - Block and monitor multiple streams for new data. - streams: a dict of stream names to stream IDs, where - IDs indicate the last ID already seen. - count: if set, only return this many items, beginning with the - earliest available. - block: number of milliseconds to wait, if nothing already present. - """ - pieces = [] - if block is not None: - if not isinstance(block, (int, long)) or block < 0: - raise DataError('XREAD block must be a non-negative integer') - pieces.append(b'BLOCK') - pieces.append(str(block)) - if count is not None: - if not isinstance(count, (int, long)) or count < 1: - raise DataError('XREAD count must be a positive integer') - pieces.append(b'COUNT') - pieces.append(str(count)) - if not isinstance(streams, dict) or len(streams) == 0: - raise DataError('XREAD streams must be a non empty dict') - pieces.append(b'STREAMS') - keys, values = izip(*iteritems(streams)) - pieces.extend(keys) - pieces.extend(values) - return self.execute_command('XREAD', *pieces) - - def xreadgroup(self, groupname, consumername, streams, count=None, - block=None, noack=False): - """ - Read from a stream via a consumer group. - groupname: name of the consumer group. - consumername: name of the requesting consumer. - streams: a dict of stream names to stream IDs, where - IDs indicate the last ID already seen. - count: if set, only return this many items, beginning with the - earliest available. - block: number of milliseconds to wait, if nothing already present. - noack: do not add messages to the PEL - """ - pieces = [b'GROUP', groupname, consumername] - if count is not None: - if not isinstance(count, (int, long)) or count < 1: - raise DataError("XREADGROUP count must be a positive integer") - pieces.append(b'COUNT') - pieces.append(str(count)) - if block is not None: - if not isinstance(block, (int, long)) or block < 0: - raise DataError("XREADGROUP block must be a non-negative " - "integer") - pieces.append(b'BLOCK') - pieces.append(str(block)) - if noack: - pieces.append(b'NOACK') - if not isinstance(streams, dict) or len(streams) == 0: - raise DataError('XREADGROUP streams must be a non empty dict') - pieces.append(b'STREAMS') - pieces.extend(streams.keys()) - pieces.extend(streams.values()) - return self.execute_command('XREADGROUP', *pieces) - - def xrevrange(self, name, max='+', min='-', count=None): - """ - Read stream values within an interval, in reverse order. - name: name of the stream - start: first stream ID. defaults to '+', - meaning the latest available. - finish: last stream ID. defaults to '-', - meaning the earliest available. - count: if set, only return this many items, beginning with the - latest available. - """ - pieces = [max, min] - if count is not None: - if not isinstance(count, (int, long)) or count < 1: - raise DataError('XREVRANGE count must be a positive integer') - pieces.append(b'COUNT') - pieces.append(str(count)) - - return self.execute_command('XREVRANGE', name, *pieces) - - def xtrim(self, name, maxlen, approximate=True): - """ - Trims old messages from a stream. - name: name of the stream. - maxlen: truncate old stream messages beyond this size - approximate: actual stream length may be slightly more than maxlen - """ - pieces = [b'MAXLEN'] - if approximate: - pieces.append(b'~') - pieces.append(maxlen) - return self.execute_command('XTRIM', name, *pieces) - - # SORTED SET COMMANDS - def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): - """ - Set any number of element-name, score pairs to the key ``name``. Pairs - are specified as a dict of element-names keys to score values. - - ``nx`` forces ZADD to only create new elements and not to update - scores for elements that already exist. - - ``xx`` forces ZADD to only update scores of elements that already - exist. New elements will not be added. - - ``ch`` modifies the return value to be the numbers of elements changed. - Changed elements include new elements that were added and elements - whose scores changed. - - ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a - single element/score pair can be specified and the score is the amount - the existing score will be incremented by. When using this mode the - return value of ZADD will be the new score of the element. - - The return value of ZADD varies based on the mode specified. With no - options, ZADD returns the number of new elements added to the sorted - set. - """ - if not mapping: - raise DataError("ZADD requires at least one element/score pair") - if nx and xx: - raise DataError("ZADD allows either 'nx' or 'xx', not both") - if incr and len(mapping) != 1: - raise DataError("ZADD option 'incr' only works when passing a " - "single element/score pair") - pieces = [] - options = {} - if nx: - pieces.append(b'NX') - if xx: - pieces.append(b'XX') - if ch: - pieces.append(b'CH') - if incr: - pieces.append(b'INCR') - options['as_score'] = True - for pair in iteritems(mapping): - pieces.append(pair[1]) - pieces.append(pair[0]) - return self.execute_command('ZADD', name, *pieces, **options) - - def zcard(self, name): - "Return the number of elements in the sorted set ``name``" - return self.execute_command('ZCARD', name) - - def zcount(self, name, min, max): - """ - Returns the number of elements in the sorted set at key ``name`` with - a score between ``min`` and ``max``. - """ - return self.execute_command('ZCOUNT', name, min, max) - - def zincrby(self, name, amount, value): - "Increment the score of ``value`` in sorted set ``name`` by ``amount``" - return self.execute_command('ZINCRBY', name, amount, value) - - def zinterstore(self, dest, keys, aggregate=None): - """ - Intersect multiple sorted sets specified by ``keys`` into - a new sorted set, ``dest``. Scores in the destination will be - aggregated based on the ``aggregate``, or SUM if none is provided. - """ - return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) - - def zlexcount(self, name, min, max): - """ - Return the number of items in the sorted set ``name`` between the - lexicographical range ``min`` and ``max``. - """ - return self.execute_command('ZLEXCOUNT', name, min, max) - - def zpopmax(self, name, count=None): - """ - Remove and return up to ``count`` members with the highest scores - from the sorted set ``name``. - """ - args = (count is not None) and [count] or [] - options = { - 'withscores': True - } - return self.execute_command('ZPOPMAX', name, *args, **options) - - def zpopmin(self, name, count=None): - """ - Remove and return up to ``count`` members with the lowest scores - from the sorted set ``name``. - """ - args = (count is not None) and [count] or [] - options = { - 'withscores': True - } - return self.execute_command('ZPOPMIN', name, *args, **options) - - def bzpopmax(self, keys, timeout=0): - """ - ZPOPMAX a value off of the first non-empty sorted set - named in the ``keys`` list. - - If none of the sorted sets in ``keys`` has a value to ZPOPMAX, - then block for ``timeout`` seconds, or until a member gets added - to one of the sorted sets. - - If timeout is 0, then block indefinitely. - """ - if timeout is None: - timeout = 0 - keys = list_or_args(keys, None) - keys.append(timeout) - return self.execute_command('BZPOPMAX', *keys) - - def bzpopmin(self, keys, timeout=0): - """ - ZPOPMIN a value off of the first non-empty sorted set - named in the ``keys`` list. - - If none of the sorted sets in ``keys`` has a value to ZPOPMIN, - then block for ``timeout`` seconds, or until a member gets added - to one of the sorted sets. - - If timeout is 0, then block indefinitely. - """ - if timeout is None: - timeout = 0 - keys = list_or_args(keys, None) - keys.append(timeout) - return self.execute_command('BZPOPMIN', *keys) - - def zrange(self, name, start, end, desc=False, withscores=False, - score_cast_func=float): - """ - Return a range of values from sorted set ``name`` between - ``start`` and ``end`` sorted in ascending order. - - ``start`` and ``end`` can be negative, indicating the end of the range. - - ``desc`` a boolean indicating whether to sort the results descendingly - - ``withscores`` indicates to return the scores along with the values. - The return type is a list of (value, score) pairs - - ``score_cast_func`` a callable used to cast the score return value - """ - if desc: - return self.zrevrange(name, start, end, withscores, - score_cast_func) - pieces = ['ZRANGE', name, start, end] - if withscores: - pieces.append(b'WITHSCORES') - options = { - 'withscores': withscores, - 'score_cast_func': score_cast_func - } - return self.execute_command(*pieces, **options) - - def zrangebylex(self, name, min, max, start=None, num=None): - """ - Return the lexicographical range of values from sorted set ``name`` - between ``min`` and ``max``. - - If ``start`` and ``num`` are specified, then return a slice of the - range. - """ - if (start is not None and num is None) or \ - (num is not None and start is None): - raise DataError("``start`` and ``num`` must both be specified") - pieces = ['ZRANGEBYLEX', name, min, max] - if start is not None and num is not None: - pieces.extend([b'LIMIT', start, num]) - return self.execute_command(*pieces) - - def zrevrangebylex(self, name, max, min, start=None, num=None): - """ - Return the reversed lexicographical range of values from sorted set - ``name`` between ``max`` and ``min``. - - If ``start`` and ``num`` are specified, then return a slice of the - range. - """ - if (start is not None and num is None) or \ - (num is not None and start is None): - raise DataError("``start`` and ``num`` must both be specified") - pieces = ['ZREVRANGEBYLEX', name, max, min] - if start is not None and num is not None: - pieces.extend([b'LIMIT', start, num]) - return self.execute_command(*pieces) - - def zrangebyscore(self, name, min, max, start=None, num=None, - withscores=False, score_cast_func=float): - """ - Return a range of values from the sorted set ``name`` with scores - between ``min`` and ``max``. - - If ``start`` and ``num`` are specified, then return a slice - of the range. - - ``withscores`` indicates to return the scores along with the values. - The return type is a list of (value, score) pairs - - `score_cast_func`` a callable used to cast the score return value - """ - if (start is not None and num is None) or \ - (num is not None and start is None): - raise DataError("``start`` and ``num`` must both be specified") - pieces = ['ZRANGEBYSCORE', name, min, max] - if start is not None and num is not None: - pieces.extend([b'LIMIT', start, num]) - if withscores: - pieces.append(b'WITHSCORES') - options = { - 'withscores': withscores, - 'score_cast_func': score_cast_func - } - return self.execute_command(*pieces, **options) - - def zrank(self, name, value): - """ - Returns a 0-based value indicating the rank of ``value`` in sorted set - ``name`` - """ - return self.execute_command('ZRANK', name, value) - - def zrem(self, name, *values): - "Remove member ``values`` from sorted set ``name``" - return self.execute_command('ZREM', name, *values) - - def zremrangebylex(self, name, min, max): - """ - Remove all elements in the sorted set ``name`` between the - lexicographical range specified by ``min`` and ``max``. - - Returns the number of elements removed. - """ - return self.execute_command('ZREMRANGEBYLEX', name, min, max) - - def zremrangebyrank(self, name, min, max): - """ - Remove all elements in the sorted set ``name`` with ranks between - ``min`` and ``max``. Values are 0-based, ordered from smallest score - to largest. Values can be negative indicating the highest scores. - Returns the number of elements removed - """ - return self.execute_command('ZREMRANGEBYRANK', name, min, max) - - def zremrangebyscore(self, name, min, max): - """ - Remove all elements in the sorted set ``name`` with scores - between ``min`` and ``max``. Returns the number of elements removed. - """ - return self.execute_command('ZREMRANGEBYSCORE', name, min, max) - - def zrevrange(self, name, start, end, withscores=False, - score_cast_func=float): - """ - Return a range of values from sorted set ``name`` between - ``start`` and ``end`` sorted in descending order. - - ``start`` and ``end`` can be negative, indicating the end of the range. - - ``withscores`` indicates to return the scores along with the values - The return type is a list of (value, score) pairs - - ``score_cast_func`` a callable used to cast the score return value - """ - pieces = ['ZREVRANGE', name, start, end] - if withscores: - pieces.append(b'WITHSCORES') - options = { - 'withscores': withscores, - 'score_cast_func': score_cast_func - } - return self.execute_command(*pieces, **options) - - def zrevrangebyscore(self, name, max, min, start=None, num=None, - withscores=False, score_cast_func=float): - """ - Return a range of values from the sorted set ``name`` with scores - between ``min`` and ``max`` in descending order. - - If ``start`` and ``num`` are specified, then return a slice - of the range. - - ``withscores`` indicates to return the scores along with the values. - The return type is a list of (value, score) pairs - - ``score_cast_func`` a callable used to cast the score return value - """ - if (start is not None and num is None) or \ - (num is not None and start is None): - raise DataError("``start`` and ``num`` must both be specified") - pieces = ['ZREVRANGEBYSCORE', name, max, min] - if start is not None and num is not None: - pieces.extend([b'LIMIT', start, num]) - if withscores: - pieces.append(b'WITHSCORES') - options = { - 'withscores': withscores, - 'score_cast_func': score_cast_func - } - return self.execute_command(*pieces, **options) - - def zrevrank(self, name, value): - """ - Returns a 0-based value indicating the descending rank of - ``value`` in sorted set ``name`` - """ - return self.execute_command('ZREVRANK', name, value) - - def zscore(self, name, value): - "Return the score of element ``value`` in sorted set ``name``" - return self.execute_command('ZSCORE', name, value) - - def zunionstore(self, dest, keys, aggregate=None): - """ - Union multiple sorted sets specified by ``keys`` into - a new sorted set, ``dest``. Scores in the destination will be - aggregated based on the ``aggregate``, or SUM if none is provided. - """ - return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) - - def _zaggregate(self, command, dest, keys, aggregate=None): - pieces = [command, dest, len(keys)] - if isinstance(keys, dict): - keys, weights = iterkeys(keys), itervalues(keys) - else: - weights = None - pieces.extend(keys) - if weights: - pieces.append(b'WEIGHTS') - pieces.extend(weights) - if aggregate: - pieces.append(b'AGGREGATE') - pieces.append(aggregate) - return self.execute_command(*pieces) - - # HYPERLOGLOG COMMANDS - def pfadd(self, name, *values): - "Adds the specified elements to the specified HyperLogLog." - return self.execute_command('PFADD', name, *values) - - def pfcount(self, *sources): - """ - Return the approximated cardinality of - the set observed by the HyperLogLog at key(s). - """ - return self.execute_command('PFCOUNT', *sources) - - def pfmerge(self, dest, *sources): - "Merge N different HyperLogLogs into a single one." - return self.execute_command('PFMERGE', dest, *sources) - - # HASH COMMANDS - def hdel(self, name, *keys): - "Delete ``keys`` from hash ``name``" - return self.execute_command('HDEL', name, *keys) - - def hexists(self, name, key): - "Returns a boolean indicating if ``key`` exists within hash ``name``" - return self.execute_command('HEXISTS', name, key) - - def hget(self, name, key): - "Return the value of ``key`` within the hash ``name``" - return self.execute_command('HGET', name, key) - - def hgetall(self, name): - "Return a Python dict of the hash's name/value pairs" - return self.execute_command('HGETALL', name) - - def hincrby(self, name, key, amount=1): - "Increment the value of ``key`` in hash ``name`` by ``amount``" - return self.execute_command('HINCRBY', name, key, amount) - - def hincrbyfloat(self, name, key, amount=1.0): - """ - Increment the value of ``key`` in hash ``name`` by floating ``amount`` - """ - return self.execute_command('HINCRBYFLOAT', name, key, amount) - - def hkeys(self, name): - "Return the list of keys within hash ``name``" - return self.execute_command('HKEYS', name) - - def hlen(self, name): - "Return the number of elements in hash ``name``" - return self.execute_command('HLEN', name) - - def hset(self, name, key, value): - """ - Set ``key`` to ``value`` within hash ``name`` - Returns 1 if HSET created a new field, otherwise 0 - """ - return self.execute_command('HSET', name, key, value) - - def hsetnx(self, name, key, value): - """ - Set ``key`` to ``value`` within hash ``name`` if ``key`` does not - exist. Returns 1 if HSETNX created a field, otherwise 0. - """ - return self.execute_command('HSETNX', name, key, value) - - def hmset(self, name, mapping): - """ - Set key to value within hash ``name`` for each corresponding - key and value from the ``mapping`` dict. - """ - if not mapping: - raise DataError("'hmset' with 'mapping' of length 0") - items = [] - for pair in iteritems(mapping): - items.extend(pair) - return self.execute_command('HMSET', name, *items) - - def hmget(self, name, keys, *args): - "Returns a list of values ordered identically to ``keys``" - args = list_or_args(keys, args) - return self.execute_command('HMGET', name, *args) - - def hvals(self, name): - "Return the list of values within hash ``name``" - return self.execute_command('HVALS', name) - - def hstrlen(self, name, key): - """ - Return the number of bytes stored in the value of ``key`` - within hash ``name`` - """ - return self.execute_command('HSTRLEN', name, key) - - def publish(self, channel, message): - """ - Publish ``message`` on ``channel``. - Returns the number of subscribers the message was delivered to. - """ - return self.execute_command('PUBLISH', channel, message) - - def pubsub_channels(self, pattern='*'): - """ - Return a list of channels that have at least one subscriber - """ - return self.execute_command('PUBSUB CHANNELS', pattern) - - def pubsub_numpat(self): - """ - Returns the number of subscriptions to patterns - """ - return self.execute_command('PUBSUB NUMPAT') - - def pubsub_numsub(self, *args): - """ - Return a list of (channel, number of subscribers) tuples - for each channel given in ``*args`` - """ - return self.execute_command('PUBSUB NUMSUB', *args) - - def cluster(self, cluster_arg, *args): - return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args) - - def eval(self, script, numkeys, *keys_and_args): - """ - Execute the Lua ``script``, specifying the ``numkeys`` the script - will touch and the key names and argument values in ``keys_and_args``. - Returns the result of the script. - - In practice, use the object returned by ``register_script``. This - function exists purely for Redis API completion. - """ - return self.execute_command('EVAL', script, numkeys, *keys_and_args) - - def evalsha(self, sha, numkeys, *keys_and_args): - """ - Use the ``sha`` to execute a Lua script already registered via EVAL - or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the - key names and argument values in ``keys_and_args``. Returns the result - of the script. - - In practice, use the object returned by ``register_script``. This - function exists purely for Redis API completion. - """ - return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args) - - def script_exists(self, *args): - """ - Check if a script exists in the script cache by specifying the SHAs of - each script as ``args``. Returns a list of boolean values indicating if - if each already script exists in the cache. - """ - return self.execute_command('SCRIPT EXISTS', *args) - - def script_flush(self): - "Flush all scripts from the script cache" - return self.execute_command('SCRIPT FLUSH') - - def script_kill(self): - "Kill the currently executing Lua script" - return self.execute_command('SCRIPT KILL') - - def script_load(self, script): - "Load a Lua ``script`` into the script cache. Returns the SHA." - return self.execute_command('SCRIPT LOAD', script) - - def register_script(self, script): - """ - Register a Lua ``script`` specifying the ``keys`` it will touch. - Returns a Script object that is callable and hides the complexity of - deal with scripts, keys, and shas. This is the preferred way to work - with Lua scripts. - """ - return Script(self, script) - - # GEO COMMANDS - def geoadd(self, name, *values): - """ - Add the specified geospatial items to the specified key identified - by the ``name`` argument. The Geospatial items are given as ordered - members of the ``values`` argument, each item or place is formed by - the triad longitude, latitude and name. - """ - if len(values) % 3 != 0: - raise DataError("GEOADD requires places with lon, lat and name" - " values") - return self.execute_command('GEOADD', name, *values) - - def geodist(self, name, place1, place2, unit=None): - """ - Return the distance between ``place1`` and ``place2`` members of the - ``name`` key. - The units must be one of the following : m, km mi, ft. By default - meters are used. - """ - pieces = [name, place1, place2] - if unit and unit not in ('m', 'km', 'mi', 'ft'): - raise DataError("GEODIST invalid unit") - elif unit: - pieces.append(unit) - return self.execute_command('GEODIST', *pieces) - - def geohash(self, name, *values): - """ - Return the geo hash string for each item of ``values`` members of - the specified key identified by the ``name`` argument. - """ - return self.execute_command('GEOHASH', name, *values) - - def geopos(self, name, *values): - """ - Return the positions of each item of ``values`` as members of - the specified key identified by the ``name`` argument. Each position - is represented by the pairs lon and lat. - """ - return self.execute_command('GEOPOS', name, *values) - - def georadius(self, name, longitude, latitude, radius, unit=None, - withdist=False, withcoord=False, withhash=False, count=None, - sort=None, store=None, store_dist=None): - """ - Return the members of the specified key identified by the - ``name`` argument which are within the borders of the area specified - with the ``latitude`` and ``longitude`` location and the maximum - distance from the center specified by the ``radius`` value. - - The units must be one of the following : m, km mi, ft. By default - - ``withdist`` indicates to return the distances of each place. - - ``withcoord`` indicates to return the latitude and longitude of - each place. - - ``withhash`` indicates to return the geohash string of each place. - - ``count`` indicates to return the number of elements up to N. - - ``sort`` indicates to return the places in a sorted way, ASC for - nearest to fairest and DESC for fairest to nearest. - - ``store`` indicates to save the places names in a sorted set named - with a specific key, each element of the destination sorted set is - populated with the score got from the original geo sorted set. - - ``store_dist`` indicates to save the places names in a sorted set - named with a specific key, instead of ``store`` the sorted set - destination score is set with the distance. - """ - return self._georadiusgeneric('GEORADIUS', - name, longitude, latitude, radius, - unit=unit, withdist=withdist, - withcoord=withcoord, withhash=withhash, - count=count, sort=sort, store=store, - store_dist=store_dist) - - def georadiusbymember(self, name, member, radius, unit=None, - withdist=False, withcoord=False, withhash=False, - count=None, sort=None, store=None, store_dist=None): - """ - This command is exactly like ``georadius`` with the sole difference - that instead of taking, as the center of the area to query, a longitude - and latitude value, it takes the name of a member already existing - inside the geospatial index represented by the sorted set. - """ - return self._georadiusgeneric('GEORADIUSBYMEMBER', - name, member, radius, unit=unit, - withdist=withdist, withcoord=withcoord, - withhash=withhash, count=count, - sort=sort, store=store, - store_dist=store_dist) - - def _georadiusgeneric(self, command, *args, **kwargs): - pieces = list(args) - if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'): - raise DataError("GEORADIUS invalid unit") - elif kwargs['unit']: - pieces.append(kwargs['unit']) - else: - pieces.append('m',) - - for arg_name, byte_repr in ( - ('withdist', b'WITHDIST'), - ('withcoord', b'WITHCOORD'), - ('withhash', b'WITHHASH')): - if kwargs[arg_name]: - pieces.append(byte_repr) - - if kwargs['count']: - pieces.extend([b'COUNT', kwargs['count']]) - - if kwargs['sort']: - if kwargs['sort'] == 'ASC': - pieces.append(b'ASC') - elif kwargs['sort'] == 'DESC': - pieces.append(b'DESC') - else: - raise DataError("GEORADIUS invalid sort") - - if kwargs['store'] and kwargs['store_dist']: - raise DataError("GEORADIUS store and store_dist cant be set" - " together") - - if kwargs['store']: - pieces.extend([b'STORE', kwargs['store']]) - - if kwargs['store_dist']: - pieces.extend([b'STOREDIST', kwargs['store_dist']]) - - return self.execute_command(command, *pieces, **kwargs) - - -StrictRedis = Redis - - -class Monitor(object): - """ - Monitor is useful for handling the MONITOR command to the redis server. - next_command() method returns one command from monitor - listen() method yields commands from monitor. - """ - monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)') - command_re = re.compile(r'"(.*?)(? conn.next_health_check: - conn.send_command('PING', self.HEALTH_CHECK_MESSAGE, - check_health=False) - - def _normalize_keys(self, data): - """ - normalize channel/pattern names to be either bytes or strings - based on whether responses are automatically decoded. this saves us - from coercing the value for each message coming in. - """ - encode = self.encoder.encode - decode = self.encoder.decode - return {decode(encode(k)): v for k, v in iteritems(data)} - - def psubscribe(self, *args, **kwargs): - """ - Subscribe to channel patterns. Patterns supplied as keyword arguments - expect a pattern name as the key and a callable as the value. A - pattern's callable will be invoked automatically when a message is - received on that pattern rather than producing a message via - ``listen()``. - """ - if args: - args = list_or_args(args[0], args[1:]) - new_patterns = dict.fromkeys(args) - new_patterns.update(kwargs) - ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)) - # update the patterns dict AFTER we send the command. we don't want to - # subscribe twice to these patterns, once for the command and again - # for the reconnection. - new_patterns = self._normalize_keys(new_patterns) - self.patterns.update(new_patterns) - self.pending_unsubscribe_patterns.difference_update(new_patterns) - return ret_val - - def punsubscribe(self, *args): - """ - Unsubscribe from the supplied patterns. If empty, unsubscribe from - all patterns. - """ - if args: - args = list_or_args(args[0], args[1:]) - patterns = self._normalize_keys(dict.fromkeys(args)) - else: - patterns = self.patterns - self.pending_unsubscribe_patterns.update(patterns) - return self.execute_command('PUNSUBSCRIBE', *args) - - def subscribe(self, *args, **kwargs): - """ - Subscribe to channels. Channels supplied as keyword arguments expect - a channel name as the key and a callable as the value. A channel's - callable will be invoked automatically when a message is received on - that channel rather than producing a message via ``listen()`` or - ``get_message()``. - """ - if args: - args = list_or_args(args[0], args[1:]) - new_channels = dict.fromkeys(args) - new_channels.update(kwargs) - ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels)) - # update the channels dict AFTER we send the command. we don't want to - # subscribe twice to these channels, once for the command and again - # for the reconnection. - new_channels = self._normalize_keys(new_channels) - self.channels.update(new_channels) - self.pending_unsubscribe_channels.difference_update(new_channels) - return ret_val - - def unsubscribe(self, *args): - """ - Unsubscribe from the supplied channels. If empty, unsubscribe from - all channels - """ - if args: - args = list_or_args(args[0], args[1:]) - channels = self._normalize_keys(dict.fromkeys(args)) - else: - channels = self.channels - self.pending_unsubscribe_channels.update(channels) - return self.execute_command('UNSUBSCRIBE', *args) - - def listen(self): - "Listen for messages on channels this client has been subscribed to" - while self.subscribed: - response = self.handle_message(self.parse_response(block=True)) - if response is not None: - yield response - - def get_message(self, ignore_subscribe_messages=False, timeout=0): - """ - Get the next message if one is available, otherwise None. - - If timeout is specified, the system will wait for `timeout` seconds - before returning. Timeout should be specified as a floating point - number. - """ - response = self.parse_response(block=False, timeout=timeout) - if response: - return self.handle_message(response, ignore_subscribe_messages) - return None - - def ping(self, message=None): - """ - Ping the Redis server - """ - message = '' if message is None else message - return self.execute_command('PING', message) - - def handle_message(self, response, ignore_subscribe_messages=False): - """ - Parses a pub/sub message. If the channel or pattern was subscribed to - with a message handler, the handler is invoked instead of a parsed - message being returned. - """ - message_type = nativestr(response[0]) - if message_type == 'pmessage': - message = { - 'type': message_type, - 'pattern': response[1], - 'channel': response[2], - 'data': response[3] - } - elif message_type == 'pong': - message = { - 'type': message_type, - 'pattern': None, - 'channel': None, - 'data': response[1] - } - else: - message = { - 'type': message_type, - 'pattern': None, - 'channel': response[1], - 'data': response[2] - } - - # if this is an unsubscribe message, remove it from memory - if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: - if message_type == 'punsubscribe': - pattern = response[1] - if pattern in self.pending_unsubscribe_patterns: - self.pending_unsubscribe_patterns.remove(pattern) - self.patterns.pop(pattern, None) - else: - channel = response[1] - if channel in self.pending_unsubscribe_channels: - self.pending_unsubscribe_channels.remove(channel) - self.channels.pop(channel, None) - - if message_type in self.PUBLISH_MESSAGE_TYPES: - # if there's a message handler, invoke it - if message_type == 'pmessage': - handler = self.patterns.get(message['pattern'], None) - else: - handler = self.channels.get(message['channel'], None) - if handler: - handler(message) - return None - elif message_type != 'pong': - # this is a subscribe/unsubscribe message. ignore if we don't - # want them - if ignore_subscribe_messages or self.ignore_subscribe_messages: - return None - - return message - - def run_in_thread(self, sleep_time=0, daemon=False): - for channel, handler in iteritems(self.channels): - if handler is None: - raise PubSubError("Channel: '%s' has no handler registered" % - channel) - for pattern, handler in iteritems(self.patterns): - if handler is None: - raise PubSubError("Pattern: '%s' has no handler registered" % - pattern) - - thread = PubSubWorkerThread(self, sleep_time, daemon=daemon) - thread.start() - return thread - - -class PubSubWorkerThread(threading.Thread): - def __init__(self, pubsub, sleep_time, daemon=False): - super(PubSubWorkerThread, self).__init__() - self.daemon = daemon - self.pubsub = pubsub - self.sleep_time = sleep_time - self._running = threading.Event() - - def run(self): - if self._running.is_set(): - return - self._running.set() - pubsub = self.pubsub - sleep_time = self.sleep_time - while self._running.is_set(): - pubsub.get_message(ignore_subscribe_messages=True, - timeout=sleep_time) - pubsub.close() - - def stop(self): - # trip the flag so the run loop exits. the run loop will - # close the pubsub connection, which disconnects the socket - # and returns the connection to the pool. - self._running.clear() - - -class Pipeline(Redis): - """ - Pipelines provide a way to transmit multiple commands to the Redis server - in one transmission. This is convenient for batch processing, such as - saving all the values in a list to Redis. - - All commands executed within a pipeline are wrapped with MULTI and EXEC - calls. This guarantees all commands executed in the pipeline will be - executed atomically. - - Any command raising an exception does *not* halt the execution of - subsequent commands in the pipeline. Instead, the exception is caught - and its instance is placed into the response list returned by execute(). - Code iterating over the response list should be able to deal with an - instance of an exception as a potential value. In general, these will be - ResponseError exceptions, such as those raised when issuing a command - on a key of a different datatype. - """ - - UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'} - - def __init__(self, connection_pool, response_callbacks, transaction, - shard_hint): - self.connection_pool = connection_pool - self.connection = None - self.response_callbacks = response_callbacks - self.transaction = transaction - self.shard_hint = shard_hint - - self.watching = False - self.reset() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.reset() - - def __del__(self): - try: - self.reset() - except Exception: - pass - - def __len__(self): - return len(self.command_stack) - - def reset(self): - self.command_stack = [] - self.scripts = set() - # make sure to reset the connection state in the event that we were - # watching something - if self.watching and self.connection: - try: - # call this manually since our unwatch or - # immediate_execute_command methods can call reset() - self.connection.send_command('UNWATCH') - self.connection.read_response() - except ConnectionError: - # disconnect will also remove any previous WATCHes - self.connection.disconnect() - # clean up the other instance attributes - self.watching = False - self.explicit_transaction = False - # we can safely return the connection to the pool here since we're - # sure we're no longer WATCHing anything - if self.connection: - self.connection_pool.release(self.connection) - self.connection = None - - def multi(self): - """ - Start a transactional block of the pipeline after WATCH commands - are issued. End the transactional block with `execute`. - """ - if self.explicit_transaction: - raise RedisError('Cannot issue nested calls to MULTI') - if self.command_stack: - raise RedisError('Commands without an initial WATCH have already ' - 'been issued') - self.explicit_transaction = True - - def execute_command(self, *args, **kwargs): - if (self.watching or args[0] == 'WATCH') and \ - not self.explicit_transaction: - return self.immediate_execute_command(*args, **kwargs) - return self.pipeline_execute_command(*args, **kwargs) - - def immediate_execute_command(self, *args, **options): - """ - Execute a command immediately, but don't auto-retry on a - ConnectionError if we're already WATCHing a variable. Used when - issuing WATCH or subsequent commands retrieving their values but before - MULTI is called. - """ - command_name = args[0] - conn = self.connection - # if this is the first call, we need a connection - if not conn: - conn = self.connection_pool.get_connection(command_name, - self.shard_hint) - self.connection = conn - try: - conn.send_command(*args) - return self.parse_response(conn, command_name, **options) - except (ConnectionError, TimeoutError) as e: - conn.disconnect() - # if we were already watching a variable, the watch is no longer - # valid since this connection has died. raise a WatchError, which - # indicates the user should retry this transaction. - if self.watching: - self.reset() - raise WatchError("A ConnectionError occured on while watching " - "one or more keys") - # if retry_on_timeout is not set, or the error is not - # a TimeoutError, raise it - if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): - self.reset() - raise - - # retry_on_timeout is set, this is a TimeoutError and we are not - # already WATCHing any variables. retry the command. - try: - conn.send_command(*args) - return self.parse_response(conn, command_name, **options) - except (ConnectionError, TimeoutError): - # a subsequent failure should simply be raised - self.reset() - raise - - def pipeline_execute_command(self, *args, **options): - """ - Stage a command to be executed when execute() is next called - - Returns the current Pipeline object back so commands can be - chained together, such as: - - pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') - - At some other point, you can then run: pipe.execute(), - which will execute all commands queued in the pipe. - """ - self.command_stack.append((args, options)) - return self - - def _execute_transaction(self, connection, commands, raise_on_error): - cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})]) - all_cmds = connection.pack_commands([args for args, options in cmds - if EMPTY_RESPONSE not in options]) - connection.send_packed_command(all_cmds) - errors = [] - - # parse off the response for MULTI - # NOTE: we need to handle ResponseErrors here and continue - # so that we read all the additional command messages from - # the socket - try: - self.parse_response(connection, '_') - except ResponseError: - errors.append((0, sys.exc_info()[1])) - - # and all the other commands - for i, command in enumerate(commands): - if EMPTY_RESPONSE in command[1]: - errors.append((i, command[1][EMPTY_RESPONSE])) - else: - try: - self.parse_response(connection, '_') - except ResponseError: - ex = sys.exc_info()[1] - self.annotate_exception(ex, i + 1, command[0]) - errors.append((i, ex)) - - # parse the EXEC. - try: - response = self.parse_response(connection, '_') - except ExecAbortError: - if self.explicit_transaction: - self.immediate_execute_command('DISCARD') - if errors: - raise errors[0][1] - raise sys.exc_info()[1] - - if response is None: - raise WatchError("Watched variable changed.") - - # put any parse errors into the response - for i, e in errors: - response.insert(i, e) - - if len(response) != len(commands): - self.connection.disconnect() - raise ResponseError("Wrong number of response items from " - "pipeline execution") - - # find any errors in the response and raise if necessary - if raise_on_error: - self.raise_first_error(commands, response) - - # We have to run response callbacks manually - data = [] - for r, cmd in izip(response, commands): - if not isinstance(r, Exception): - args, options = cmd - command_name = args[0] - if command_name in self.response_callbacks: - r = self.response_callbacks[command_name](r, **options) - data.append(r) - return data - - def _execute_pipeline(self, connection, commands, raise_on_error): - # build up all commands into a single request to increase network perf - all_cmds = connection.pack_commands([args for args, _ in commands]) - connection.send_packed_command(all_cmds) - - response = [] - for args, options in commands: - try: - response.append( - self.parse_response(connection, args[0], **options)) - except ResponseError: - response.append(sys.exc_info()[1]) - - if raise_on_error: - self.raise_first_error(commands, response) - return response - - def raise_first_error(self, commands, response): - for i, r in enumerate(response): - if isinstance(r, ResponseError): - self.annotate_exception(r, i + 1, commands[i][0]) - raise r - - def annotate_exception(self, exception, number, command): - cmd = ' '.join(imap(safe_unicode, command)) - msg = 'Command # %d (%s) of pipeline caused error: %s' % ( - number, cmd, safe_unicode(exception.args[0])) - exception.args = (msg,) + exception.args[1:] - - def parse_response(self, connection, command_name, **options): - result = Redis.parse_response( - self, connection, command_name, **options) - if command_name in self.UNWATCH_COMMANDS: - self.watching = False - elif command_name == 'WATCH': - self.watching = True - return result - - def load_scripts(self): - # make sure all scripts that are about to be run on this pipeline exist - scripts = list(self.scripts) - immediate = self.immediate_execute_command - shas = [s.sha for s in scripts] - # we can't use the normal script_* methods because they would just - # get buffered in the pipeline. - exists = immediate('SCRIPT EXISTS', *shas) - if not all(exists): - for s, exist in izip(scripts, exists): - if not exist: - s.sha = immediate('SCRIPT LOAD', s.script) - - def execute(self, raise_on_error=True): - "Execute all the commands in the current pipeline" - stack = self.command_stack - if not stack: - return [] - if self.scripts: - self.load_scripts() - if self.transaction or self.explicit_transaction: - execute = self._execute_transaction - else: - execute = self._execute_pipeline - - conn = self.connection - if not conn: - conn = self.connection_pool.get_connection('MULTI', - self.shard_hint) - # assign to self.connection so reset() releases the connection - # back to the pool after we're done - self.connection = conn - - try: - return execute(conn, stack, raise_on_error) - except (ConnectionError, TimeoutError) as e: - conn.disconnect() - # if we were watching a variable, the watch is no longer valid - # since this connection has died. raise a WatchError, which - # indicates the user should retry this transaction. - if self.watching: - raise WatchError("A ConnectionError occured on while watching " - "one or more keys") - # if retry_on_timeout is not set, or the error is not - # a TimeoutError, raise it - if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): - raise - # retry a TimeoutError when retry_on_timeout is set - return execute(conn, stack, raise_on_error) - finally: - self.reset() - - def watch(self, *names): - "Watches the values at keys ``names``" - if self.explicit_transaction: - raise RedisError('Cannot issue a WATCH after a MULTI') - return self.execute_command('WATCH', *names) - - def unwatch(self): - "Unwatches all previously specified keys" - return self.watching and self.execute_command('UNWATCH') or True - - -class Script(object): - "An executable Lua script object returned by ``register_script``" - - def __init__(self, registered_client, script): - self.registered_client = registered_client - self.script = script - # Precalculate and store the SHA1 hex digest of the script. - - if isinstance(script, basestring): - # We need the encoding from the client in order to generate an - # accurate byte representation of the script - encoder = registered_client.connection_pool.get_encoder() - script = encoder.encode(script) - self.sha = hashlib.sha1(script).hexdigest() - - def __call__(self, keys=[], args=[], client=None): - "Execute the script, passing any required ``args``" - if client is None: - client = self.registered_client - args = tuple(keys) + tuple(args) - # make sure the Redis server knows about the script - if isinstance(client, Pipeline): - # Make sure the pipeline can register the script before executing. - client.scripts.add(self) - try: - return client.evalsha(self.sha, len(keys), *args) - except NoScriptError: - # Maybe the client is pointed to a differnet server than the client - # that created this instance? - # Overwrite the sha just in case there was a discrepancy. - self.sha = client.script_load(self.script) - return client.evalsha(self.sha, len(keys), *args) - - -class BitFieldOperation(object): - """ - Command builder for BITFIELD commands. - """ - def __init__(self, client, key, default_overflow=None): - self.client = client - self.key = key - self._default_overflow = default_overflow - self.reset() - - def reset(self): - """ - Reset the state of the instance to when it was constructed - """ - self.operations = [] - self._last_overflow = 'WRAP' - self.overflow(self._default_overflow or self._last_overflow) - - def overflow(self, overflow): - """ - Update the overflow algorithm of successive INCRBY operations - :param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the - Redis docs for descriptions of these algorithmsself. - :returns: a :py:class:`BitFieldOperation` instance. - """ - overflow = overflow.upper() - if overflow != self._last_overflow: - self._last_overflow = overflow - self.operations.append(('OVERFLOW', overflow)) - return self - - def incrby(self, fmt, offset, increment, overflow=None): - """ - Increment a bitfield by a given amount. - :param fmt: format-string for the bitfield being updated, e.g. 'u8' - for an unsigned 8-bit integer. - :param offset: offset (in number of bits). If prefixed with a - '#', this is an offset multiplier, e.g. given the arguments - fmt='u8', offset='#2', the offset will be 16. - :param int increment: value to increment the bitfield by. - :param str overflow: overflow algorithm. Defaults to WRAP, but other - acceptable values are SAT and FAIL. See the Redis docs for - descriptions of these algorithms. - :returns: a :py:class:`BitFieldOperation` instance. - """ - if overflow is not None: - self.overflow(overflow) - - self.operations.append(('INCRBY', fmt, offset, increment)) - return self - - def get(self, fmt, offset): - """ - Get the value of a given bitfield. - :param fmt: format-string for the bitfield being read, e.g. 'u8' for - an unsigned 8-bit integer. - :param offset: offset (in number of bits). If prefixed with a - '#', this is an offset multiplier, e.g. given the arguments - fmt='u8', offset='#2', the offset will be 16. - :returns: a :py:class:`BitFieldOperation` instance. - """ - self.operations.append(('GET', fmt, offset)) - return self - - def set(self, fmt, offset, value): - """ - Set the value of a given bitfield. - :param fmt: format-string for the bitfield being read, e.g. 'u8' for - an unsigned 8-bit integer. - :param offset: offset (in number of bits). If prefixed with a - '#', this is an offset multiplier, e.g. given the arguments - fmt='u8', offset='#2', the offset will be 16. - :param int value: value to set at the given position. - :returns: a :py:class:`BitFieldOperation` instance. - """ - self.operations.append(('SET', fmt, offset, value)) - return self - - @property - def command(self): - cmd = ['BITFIELD', self.key] - for ops in self.operations: - cmd.extend(ops) - return cmd - - def execute(self): - """ - Execute the operation(s) in a single BITFIELD command. The return value - is a list of values corresponding to each operation. If the client - used to create this instance was a pipeline, the list of values - will be present within the pipeline's execute. - """ - command = self.command - self.reset() - return self.client.execute_command(*command) diff --git a/utill/rediss/connection.py b/utill/rediss/connection.py deleted file mode 100644 index 2968e74..0000000 --- a/utill/rediss/connection.py +++ /dev/null @@ -1,1261 +0,0 @@ -from __future__ import unicode_literals -from distutils.version import StrictVersion -from itertools import chain -from time import time -import errno -import io -import os -import socket -import sys -import threading -import warnings - -from ._compat import (xrange, imap, byte_to_chr, unicode, long, - nativestr, basestring, iteritems, - LifoQueue, Empty, Full, urlparse, parse_qs, - recv, recv_into, unquote, BlockingIOError) -from .exceptions import ( - AuthenticationError, - BusyLoadingError, - ConnectionError, - DataError, - ExecAbortError, - InvalidResponse, - NoScriptError, - ReadOnlyError, - RedisError, - ResponseError, - TimeoutError, -) -from .utils import HIREDIS_AVAILABLE - -try: - import ssl - ssl_available = True -except ImportError: - ssl_available = False - -NONBLOCKING_EXCEPTION_ERROR_NUMBERS = { - BlockingIOError: errno.EWOULDBLOCK, -} - -if ssl_available: - if hasattr(ssl, 'SSLWantReadError'): - NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2 - NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2 - else: - NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2 - -# In Python 2.7 a socket.error is raised for a nonblocking read. -# The _compat module aliases BlockingIOError to socket.error to be -# Python 2/3 compatible. -# However this means that all socket.error exceptions need to be handled -# properly within these exception handlers. -# We need to make sure socket.error is included in these handlers and -# provide a dummy error number that will never match a real exception. -if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS: - NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999 - -NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys()) - -if HIREDIS_AVAILABLE: - import hiredis - - hiredis_version = StrictVersion(hiredis.__version__) - HIREDIS_SUPPORTS_CALLABLE_ERRORS = \ - hiredis_version >= StrictVersion('0.1.3') - HIREDIS_SUPPORTS_BYTE_BUFFER = \ - hiredis_version >= StrictVersion('0.1.4') - HIREDIS_SUPPORTS_ENCODING_ERRORS = \ - hiredis_version >= StrictVersion('1.0.0') - - if not HIREDIS_SUPPORTS_BYTE_BUFFER: - msg = ("redis-py works best with hiredis >= 0.1.4. You're running " - "hiredis %s. Please consider upgrading." % hiredis.__version__) - warnings.warn(msg) - - HIREDIS_USE_BYTE_BUFFER = True - # only use byte buffer if hiredis supports it - if not HIREDIS_SUPPORTS_BYTE_BUFFER: - HIREDIS_USE_BYTE_BUFFER = False - -SYM_STAR = b'*' -SYM_DOLLAR = b'$' -SYM_CRLF = b'\r\n' -SYM_EMPTY = b'' - -SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." - -SENTINEL = object() - - -class Encoder(object): - "Encode strings to bytes and decode bytes to strings" - - def __init__(self, encoding, encoding_errors, decode_responses): - self.encoding = encoding - self.encoding_errors = encoding_errors - self.decode_responses = decode_responses - - def encode(self, value): - "Return a bytestring representation of the value" - if isinstance(value, bytes): - return value - elif isinstance(value, bool): - # special case bool since it is a subclass of int - raise DataError("Invalid input of type: 'bool'. Convert to a " - "byte, string or number first.") - elif isinstance(value, float): - value = repr(value).encode() - elif isinstance(value, (int, long)): - # python 2 repr() on longs is '123L', so use str() instead - value = str(value).encode() - elif not isinstance(value, basestring): - # a value we don't know how to deal with. throw an error - typename = type(value).__name__ - raise DataError("Invalid input of type: '%s'. Convert to a " - "byte, string or number first." % typename) - if isinstance(value, unicode): - value = value.encode(self.encoding, self.encoding_errors) - return value - - def decode(self, value, force=False): - "Return a unicode string from the byte representation" - if (self.decode_responses or force) and isinstance(value, bytes): - value = value.decode(self.encoding, self.encoding_errors) - return value - - -class BaseParser(object): - EXCEPTION_CLASSES = { - 'ERR': { - 'max number of clients reached': ConnectionError, - 'Client sent AUTH, but no password is set': AuthenticationError, - 'invalid password': AuthenticationError, - }, - 'EXECABORT': ExecAbortError, - 'LOADING': BusyLoadingError, - 'NOSCRIPT': NoScriptError, - 'READONLY': ReadOnlyError, - 'NOAUTH': AuthenticationError, - } - - def parse_error(self, response): - "Parse an error response" - error_code = response.split(' ')[0] - if error_code in self.EXCEPTION_CLASSES: - response = response[len(error_code) + 1:] - exception_class = self.EXCEPTION_CLASSES[error_code] - if isinstance(exception_class, dict): - exception_class = exception_class.get(response, ResponseError) - return exception_class(response) - return ResponseError(response) - - -class SocketBuffer(object): - def __init__(self, socket, socket_read_size, socket_timeout): - self._sock = socket - self.socket_read_size = socket_read_size - self.socket_timeout = socket_timeout - self._buffer = io.BytesIO() - # number of bytes written to the buffer from the socket - self.bytes_written = 0 - # number of bytes read from the buffer - self.bytes_read = 0 - - @property - def length(self): - return self.bytes_written - self.bytes_read - - def _read_from_socket(self, length=None, timeout=SENTINEL, - raise_on_timeout=True): - sock = self._sock - socket_read_size = self.socket_read_size - buf = self._buffer - buf.seek(self.bytes_written) - marker = 0 - custom_timeout = timeout is not SENTINEL - - try: - if custom_timeout: - sock.settimeout(timeout) - while True: - data = recv(self._sock, socket_read_size) - # an empty string indicates the server shutdown the socket - if isinstance(data, bytes) and len(data) == 0: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - buf.write(data) - data_length = len(data) - self.bytes_written += data_length - marker += data_length - - if length is not None and length > marker: - continue - return True - except socket.timeout: - if raise_on_timeout: - raise TimeoutError("Timeout reading from socket") - return False - except NONBLOCKING_EXCEPTIONS as ex: - # if we're in nonblocking mode and the recv raises a - # blocking error, simply return False indicating that - # there's no data to be read. otherwise raise the - # original exception. - allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) - if not raise_on_timeout and ex.errno == allowed: - return False - raise ConnectionError("Error while reading from socket: %s" % - (ex.args,)) - finally: - if custom_timeout: - sock.settimeout(self.socket_timeout) - - def can_read(self, timeout): - return bool(self.length) or \ - self._read_from_socket(timeout=timeout, - raise_on_timeout=False) - - def read(self, length): - length = length + 2 # make sure to read the \r\n terminator - # make sure we've read enough data from the socket - if length > self.length: - self._read_from_socket(length - self.length) - - self._buffer.seek(self.bytes_read) - data = self._buffer.read(length) - self.bytes_read += len(data) - - # purge the buffer when we've consumed it all so it doesn't - # grow forever - if self.bytes_read == self.bytes_written: - self.purge() - - return data[:-2] - - def readline(self): - buf = self._buffer - buf.seek(self.bytes_read) - data = buf.readline() - while not data.endswith(SYM_CRLF): - # there's more data in the socket that we need - self._read_from_socket() - buf.seek(self.bytes_read) - data = buf.readline() - - self.bytes_read += len(data) - - # purge the buffer when we've consumed it all so it doesn't - # grow forever - if self.bytes_read == self.bytes_written: - self.purge() - - return data[:-2] - - def purge(self): - self._buffer.seek(0) - self._buffer.truncate() - self.bytes_written = 0 - self.bytes_read = 0 - - def close(self): - try: - self.purge() - self._buffer.close() - except Exception: - # issue #633 suggests the purge/close somehow raised a - # BadFileDescriptor error. Perhaps the client ran out of - # memory or something else? It's probably OK to ignore - # any error being raised from purge/close since we're - # removing the reference to the instance below. - pass - self._buffer = None - self._sock = None - - -class PythonParser(BaseParser): - "Plain Python parsing class" - def __init__(self, socket_read_size): - self.socket_read_size = socket_read_size - self.encoder = None - self._sock = None - self._buffer = None - - def __del__(self): - try: - self.on_disconnect() - except Exception: - pass - - def on_connect(self, connection): - "Called when the socket connects" - self._sock = connection._sock - self._buffer = SocketBuffer(self._sock, - self.socket_read_size, - connection.socket_timeout) - self.encoder = connection.encoder - - def on_disconnect(self): - "Called when the socket disconnects" - self._sock = None - if self._buffer is not None: - self._buffer.close() - self._buffer = None - self.encoder = None - - def can_read(self, timeout): - return self._buffer and self._buffer.can_read(timeout) - - def read_response(self): - response = self._buffer.readline() - if not response: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - - byte, response = byte_to_chr(response[0]), response[1:] - - if byte not in ('-', '+', ':', '$', '*'): - raise InvalidResponse("Protocol Error: %s, %s" % - (str(byte), str(response))) - - # server returned an error - if byte == '-': - response = nativestr(response) - error = self.parse_error(response) - # if the error is a ConnectionError, raise immediately so the user - # is notified - if isinstance(error, ConnectionError): - raise error - # otherwise, we're dealing with a ResponseError that might belong - # inside a pipeline response. the connection's read_response() - # and/or the pipeline's execute() will raise this error if - # necessary, so just return the exception instance here. - return error - # single value - elif byte == '+': - pass - # int value - elif byte == ':': - response = long(response) - # bulk response - elif byte == '$': - length = int(response) - if length == -1: - return None - response = self._buffer.read(length) - # multi-bulk response - elif byte == '*': - length = int(response) - if length == -1: - return None - response = [self.read_response() for i in xrange(length)] - if isinstance(response, bytes): - response = self.encoder.decode(response) - return response - - -class HiredisParser(BaseParser): - "Parser class for connections using Hiredis" - def __init__(self, socket_read_size): - if not HIREDIS_AVAILABLE: - raise RedisError("Hiredis is not installed") - self.socket_read_size = socket_read_size - - if HIREDIS_USE_BYTE_BUFFER: - self._buffer = bytearray(socket_read_size) - - def __del__(self): - try: - self.on_disconnect() - except Exception: - pass - - def on_connect(self, connection): - self._sock = connection._sock - self._socket_timeout = connection.socket_timeout - kwargs = { - 'protocolError': InvalidResponse, - 'replyError': self.parse_error, - } - - # hiredis < 0.1.3 doesn't support functions that create exceptions - if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: - kwargs['replyError'] = ResponseError - - if connection.encoder.decode_responses: - kwargs['encoding'] = connection.encoder.encoding - if HIREDIS_SUPPORTS_ENCODING_ERRORS: - kwargs['errors'] = connection.encoder.encoding_errors - self._reader = hiredis.Reader(**kwargs) - self._next_response = False - - def on_disconnect(self): - self._sock = None - self._reader = None - self._next_response = False - - def can_read(self, timeout): - if not self._reader: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - - if self._next_response is False: - self._next_response = self._reader.gets() - if self._next_response is False: - return self.read_from_socket(timeout=timeout, - raise_on_timeout=False) - return True - - def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True): - sock = self._sock - custom_timeout = timeout is not SENTINEL - try: - if custom_timeout: - sock.settimeout(timeout) - if HIREDIS_USE_BYTE_BUFFER: - bufflen = recv_into(self._sock, self._buffer) - if bufflen == 0: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - self._reader.feed(self._buffer, 0, bufflen) - else: - buffer = recv(self._sock, self.socket_read_size) - # an empty string indicates the server shutdown the socket - if not isinstance(buffer, bytes) or len(buffer) == 0: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - self._reader.feed(buffer) - # data was read from the socket and added to the buffer. - # return True to indicate that data was read. - return True - except socket.timeout: - if raise_on_timeout: - raise TimeoutError("Timeout reading from socket") - return False - except NONBLOCKING_EXCEPTIONS as ex: - # if we're in nonblocking mode and the recv raises a - # blocking error, simply return False indicating that - # there's no data to be read. otherwise raise the - # original exception. - allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) - if not raise_on_timeout and ex.errno == allowed: - return False - raise ConnectionError("Error while reading from socket: %s" % - (ex.args,)) - finally: - if custom_timeout: - sock.settimeout(self._socket_timeout) - - def read_response(self): - if not self._reader: - raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) - - # _next_response might be cached from a can_read() call - if self._next_response is not False: - response = self._next_response - self._next_response = False - return response - - response = self._reader.gets() - while response is False: - self.read_from_socket() - response = self._reader.gets() - # if an older version of hiredis is installed, we need to attempt - # to convert ResponseErrors to their appropriate types. - if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: - if isinstance(response, ResponseError): - response = self.parse_error(response.args[0]) - elif isinstance(response, list) and response and \ - isinstance(response[0], ResponseError): - response[0] = self.parse_error(response[0].args[0]) - # if the response is a ConnectionError or the response is a list and - # the first item is a ConnectionError, raise it as something bad - # happened - if isinstance(response, ConnectionError): - raise response - elif isinstance(response, list) and response and \ - isinstance(response[0], ConnectionError): - raise response[0] - return response - - -if HIREDIS_AVAILABLE: - DefaultParser = HiredisParser -else: - DefaultParser = PythonParser - - -class Connection(object): - "Manages TCP communication to and from a Redis server" - description_format = "Connection" - - def __init__(self, host='localhost', port=6379, db=0, password=None, - socket_timeout=None, socket_connect_timeout=None, - socket_keepalive=False, socket_keepalive_options=None, - socket_type=0, retry_on_timeout=False, encoding='utf-8', - encoding_errors='strict', decode_responses=False, - parser_class=DefaultParser, socket_read_size=65536, - health_check_interval=0): - self.pid = os.getpid() - self.host = host - self.port = int(port) - self.db = db - self.password = password - self.socket_timeout = socket_timeout - self.socket_connect_timeout = socket_connect_timeout or socket_timeout - self.socket_keepalive = socket_keepalive - self.socket_keepalive_options = socket_keepalive_options or {} - self.socket_type = socket_type - self.retry_on_timeout = retry_on_timeout - self.health_check_interval = health_check_interval - self.next_health_check = 0 - self.encoder = Encoder(encoding, encoding_errors, decode_responses) - self._sock = None - self._parser = parser_class(socket_read_size=socket_read_size) - self._description_args = { - 'host': self.host, - 'port': self.port, - 'db': self.db, - } - self._connect_callbacks = [] - self._buffer_cutoff = 6000 - - def __repr__(self): - return self.description_format % self._description_args - - def __del__(self): - try: - self.disconnect() - except Exception: - pass - - def register_connect_callback(self, callback): - self._connect_callbacks.append(callback) - - def clear_connect_callbacks(self): - self._connect_callbacks = [] - - def connect(self): - "Connects to the Redis server if not already connected" - if self._sock: - return - try: - sock = self._connect() - except socket.timeout: - raise TimeoutError("Timeout connecting to server") - except socket.error: - e = sys.exc_info()[1] - raise ConnectionError(self._error_message(e)) - - self._sock = sock - try: - self.on_connect() - except RedisError: - # clean up after any error in on_connect - self.disconnect() - raise - - # run any user callbacks. right now the only internal callback - # is for pubsub channel/pattern resubscription - for callback in self._connect_callbacks: - callback(self) - - def _connect(self): - "Create a TCP socket connection" - # we want to mimic what socket.create_connection does to support - # ipv4/ipv6, but we want to set options prior to calling - # socket.connect() - err = None - for res in socket.getaddrinfo(self.host, self.port, self.socket_type, - socket.SOCK_STREAM): - family, socktype, proto, canonname, socket_address = res - sock = None - try: - sock = socket.socket(family, socktype, proto) - # TCP_NODELAY - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - # TCP_KEEPALIVE - if self.socket_keepalive: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - for k, v in iteritems(self.socket_keepalive_options): - sock.setsockopt(socket.IPPROTO_TCP, k, v) - - # set the socket_connect_timeout before we connect - sock.settimeout(self.socket_connect_timeout) - - # connect - sock.connect(socket_address) - - # set the socket_timeout now that we're connected - sock.settimeout(self.socket_timeout) - return sock - - except socket.error as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: - raise err - raise socket.error("socket.getaddrinfo returned an empty list") - - def _error_message(self, exception): - # args for socket.error can either be (errno, "message") - # or just "message" - if len(exception.args) == 1: - return "Error connecting to %s:%s. %s." % \ - (self.host, self.port, exception.args[0]) - else: - return "Error %s connecting to %s:%s. %s." % \ - (exception.args[0], self.host, self.port, exception.args[1]) - - def on_connect(self): - "Initialize the connection, authenticate and select a database" - self._parser.on_connect(self) - - # if a password is specified, authenticate - if self.password: - # avoid checking health here -- PING will fail if we try - # to check the health prior to the AUTH - self.send_command('AUTH', self.password, check_health=False) - if nativestr(self.read_response()) != 'OK': - raise AuthenticationError('Invalid Password') - - # if a database is specified, switch to it - if self.db: - self.send_command('SELECT', self.db) - if nativestr(self.read_response()) != 'OK': - raise ConnectionError('Invalid Database') - - def disconnect(self): - "Disconnects from the Redis server" - self._parser.on_disconnect() - if self._sock is None: - return - try: - if os.getpid() == self.pid: - self._sock.shutdown(socket.SHUT_RDWR) - self._sock.close() - except socket.error: - pass - self._sock = None - - def check_health(self): - "Check the health of the connection with a PING/PONG" - if self.health_check_interval and time() > self.next_health_check: - try: - self.send_command('PING', check_health=False) - if nativestr(self.read_response()) != 'PONG': - raise ConnectionError( - 'Bad response from PING health check') - except (ConnectionError, TimeoutError) as ex: - self.disconnect() - self.send_command('PING', check_health=False) - if nativestr(self.read_response()) != 'PONG': - raise ConnectionError( - 'Bad response from PING health check') - - def send_packed_command(self, command, check_health=True): - "Send an already packed command to the Redis server" - if not self._sock: - self.connect() - # guard against health check recurrsion - if check_health: - self.check_health() - try: - if isinstance(command, str): - command = [command] - for item in command: - self._sock.sendall(item) - except socket.timeout: - self.disconnect() - raise TimeoutError("Timeout writing to socket") - except socket.error: - e = sys.exc_info()[1] - self.disconnect() - if len(e.args) == 1: - errno, errmsg = 'UNKNOWN', e.args[0] - else: - errno = e.args[0] - errmsg = e.args[1] - raise ConnectionError("Error %s while writing to socket. %s." % - (errno, errmsg)) - except: # noqa: E722 - self.disconnect() - raise - - def send_command(self, *args, **kwargs): - "Pack and send a command to the Redis server" - self.send_packed_command(self.pack_command(*args), - check_health=kwargs.get('check_health', True)) - - def can_read(self, timeout=0): - "Poll the socket to see if there's data that can be read." - sock = self._sock - if not sock: - self.connect() - sock = self._sock - return self._parser.can_read(timeout) - - def read_response(self): - "Read the response from a previously sent command" - try: - response = self._parser.read_response() - except socket.timeout: - self.disconnect() - raise TimeoutError("Timeout reading from %s:%s" % - (self.host, self.port)) - except socket.error: - self.disconnect() - e = sys.exc_info()[1] - raise ConnectionError("Error while reading from %s:%s : %s" % - (self.host, self.port, e.args)) - except: # noqa: E722 - self.disconnect() - raise - - if self.health_check_interval: - self.next_health_check = time() + self.health_check_interval - - if isinstance(response, ResponseError): - raise response - return response - - def pack_command(self, *args): - "Pack a series of arguments into the Redis protocol" - output = [] - # the client might have included 1 or more literal arguments in - # the command name, e.g., 'CONFIG GET'. The Redis server expects these - # arguments to be sent separately, so split the first argument - # manually. These arguments should be bytestrings so that they are - # not encoded. - if isinstance(args[0], unicode): - args = tuple(args[0].encode().split()) + args[1:] - elif b' ' in args[0]: - args = tuple(args[0].split()) + args[1:] - - buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) - - buffer_cutoff = self._buffer_cutoff - for arg in imap(self.encoder.encode, args): - # to avoid large string mallocs, chunk the command into the - # output list if we're sending large values - if len(buff) > buffer_cutoff or len(arg) > buffer_cutoff: - buff = SYM_EMPTY.join( - (buff, SYM_DOLLAR, str(len(arg)).encode(), SYM_CRLF)) - output.append(buff) - output.append(arg) - buff = SYM_CRLF - else: - buff = SYM_EMPTY.join( - (buff, SYM_DOLLAR, str(len(arg)).encode(), - SYM_CRLF, arg, SYM_CRLF)) - output.append(buff) - return output - - def pack_commands(self, commands): - "Pack multiple commands into the Redis protocol" - output = [] - pieces = [] - buffer_length = 0 - buffer_cutoff = self._buffer_cutoff - - for cmd in commands: - for chunk in self.pack_command(*cmd): - chunklen = len(chunk) - if buffer_length > buffer_cutoff or chunklen > buffer_cutoff: - output.append(SYM_EMPTY.join(pieces)) - buffer_length = 0 - pieces = [] - - if chunklen > self._buffer_cutoff: - output.append(chunk) - else: - pieces.append(chunk) - buffer_length += chunklen - - if pieces: - output.append(SYM_EMPTY.join(pieces)) - return output - - -class SSLConnection(Connection): - description_format = "SSLConnection" - - def __init__(self, ssl_keyfile=None, ssl_certfile=None, - ssl_cert_reqs='required', ssl_ca_certs=None, **kwargs): - if not ssl_available: - raise RedisError("Python wasn't built with SSL support") - - super(SSLConnection, self).__init__(**kwargs) - - self.keyfile = ssl_keyfile - self.certfile = ssl_certfile - if ssl_cert_reqs is None: - ssl_cert_reqs = ssl.CERT_NONE - elif isinstance(ssl_cert_reqs, basestring): - CERT_REQS = { - 'none': ssl.CERT_NONE, - 'optional': ssl.CERT_OPTIONAL, - 'required': ssl.CERT_REQUIRED - } - if ssl_cert_reqs not in CERT_REQS: - raise RedisError( - "Invalid SSL Certificate Requirements Flag: %s" % - ssl_cert_reqs) - ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] - self.cert_reqs = ssl_cert_reqs - self.ca_certs = ssl_ca_certs - - def _connect(self): - "Wrap the socket with SSL support" - sock = super(SSLConnection, self)._connect() - if hasattr(ssl, "create_default_context"): - context = ssl.create_default_context() - context.check_hostname = False - context.verify_mode = self.cert_reqs - if self.certfile and self.keyfile: - context.load_cert_chain(certfile=self.certfile, - keyfile=self.keyfile) - if self.ca_certs: - context.load_verify_locations(self.ca_certs) - sock = context.wrap_socket(sock, server_hostname=self.host) - else: - # In case this code runs in a version which is older than 2.7.9, - # we want to fall back to old code - sock = ssl.wrap_socket(sock, - cert_reqs=self.cert_reqs, - keyfile=self.keyfile, - certfile=self.certfile, - ca_certs=self.ca_certs) - return sock - - -class UnixDomainSocketConnection(Connection): - description_format = "UnixDomainSocketConnection" - - def __init__(self, path='', db=0, password=None, - socket_timeout=None, encoding='utf-8', - encoding_errors='strict', decode_responses=False, - retry_on_timeout=False, - parser_class=DefaultParser, socket_read_size=65536, - health_check_interval=0): - self.pid = os.getpid() - self.path = path - self.db = db - self.password = password - self.socket_timeout = socket_timeout - self.retry_on_timeout = retry_on_timeout - self.health_check_interval = health_check_interval - self.next_health_check = 0 - self.encoder = Encoder(encoding, encoding_errors, decode_responses) - self._sock = None - self._parser = parser_class(socket_read_size=socket_read_size) - self._description_args = { - 'path': self.path, - 'db': self.db, - } - self._connect_callbacks = [] - self._buffer_cutoff = 6000 - - def _connect(self): - "Create a Unix domain socket connection" - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.settimeout(self.socket_timeout) - sock.connect(self.path) - return sock - - def _error_message(self, exception): - # args for socket.error can either be (errno, "message") - # or just "message" - if len(exception.args) == 1: - return "Error connecting to unix socket: %s. %s." % \ - (self.path, exception.args[0]) - else: - return "Error %s connecting to unix socket: %s. %s." % \ - (exception.args[0], self.path, exception.args[1]) - - -FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO') - - -def to_bool(value): - if value is None or value == '': - return None - if isinstance(value, basestring) and value.upper() in FALSE_STRINGS: - return False - return bool(value) - - -URL_QUERY_ARGUMENT_PARSERS = { - 'socket_timeout': float, - 'socket_connect_timeout': float, - 'socket_keepalive': to_bool, - 'retry_on_timeout': to_bool, - 'max_connections': int, - 'health_check_interval': int, -} - - -class ConnectionPool(object): - "Generic connection pool" - @classmethod - def from_url(cls, url, db=None, decode_components=False, **kwargs): - """ - Return a connection pool configured from the given URL. - - For example:: - - redis://[:password]@localhost:6379/0 - rediss://[:password]@localhost:6379/0 - unix://[:password]@/path/to/socket.sock?db=0 - - Three URL schemes are supported: - - - ```redis://`` - `_ creates a - normal TCP socket connection - - ```rediss://`` - `_ creates - a SSL wrapped TCP socket connection - - ``unix://`` creates a Unix Domain Socket connection - - There are several ways to specify a database number. The parse function - will return the first specified option: - 1. A ``db`` querystring option, e.g. redis://localhost?db=0 - 2. If using the redis:// scheme, the path argument of the url, e.g. - redis://localhost/0 - 3. The ``db`` argument to this function. - - If none of these options are specified, db=0 is used. - - The ``decode_components`` argument allows this function to work with - percent-encoded URLs. If this argument is set to ``True`` all ``%xx`` - escapes will be replaced by their single-character equivalents after - the URL has been parsed. This only applies to the ``hostname``, - ``path``, and ``password`` components. - - Any additional querystring arguments and keyword arguments will be - passed along to the ConnectionPool class's initializer. The querystring - arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied - are parsed as float values. The arguments ``socket_keepalive`` and - ``retry_on_timeout`` are parsed to boolean values that accept - True/False, Yes/No values to indicate state. Invalid types cause a - ``UserWarning`` to be raised. In the case of conflicting arguments, - querystring arguments always win. - - """ - url = urlparse(url) - url_options = {} - - for name, value in iteritems(parse_qs(url.query)): - if value and len(value) > 0: - parser = URL_QUERY_ARGUMENT_PARSERS.get(name) - if parser: - try: - url_options[name] = parser(value[0]) - except (TypeError, ValueError): - warnings.warn(UserWarning( - "Invalid value for `%s` in connection URL." % name - )) - else: - url_options[name] = value[0] - - if decode_components: - password = unquote(url.password) if url.password else None - path = unquote(url.path) if url.path else None - hostname = unquote(url.hostname) if url.hostname else None - else: - password = url.password - path = url.path - hostname = url.hostname - - # We only support redis://, rediss:// and unix:// schemes. - if url.scheme == 'unix': - url_options.update({ - 'password': password, - 'path': path, - 'connection_class': UnixDomainSocketConnection, - }) - - elif url.scheme in ('redis', 'rediss'): - url_options.update({ - 'host': hostname, - 'port': int(url.port or 6379), - 'password': password, - }) - - # If there's a path argument, use it as the db argument if a - # querystring value wasn't specified - if 'db' not in url_options and path: - try: - url_options['db'] = int(path.replace('/', '')) - except (AttributeError, ValueError): - pass - - if url.scheme == 'rediss': - url_options['connection_class'] = SSLConnection - else: - valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://')) - raise ValueError('Redis URL must specify one of the following' - 'schemes (%s)' % valid_schemes) - - # last shot at the db value - url_options['db'] = int(url_options.get('db', db or 0)) - - # update the arguments from the URL values - kwargs.update(url_options) - - # backwards compatability - if 'charset' in kwargs: - warnings.warn(DeprecationWarning( - '"charset" is deprecated. Use "encoding" instead')) - kwargs['encoding'] = kwargs.pop('charset') - if 'errors' in kwargs: - warnings.warn(DeprecationWarning( - '"errors" is deprecated. Use "encoding_errors" instead')) - kwargs['encoding_errors'] = kwargs.pop('errors') - - return cls(**kwargs) - - def __init__(self, connection_class=Connection, max_connections=None, - **connection_kwargs): - """ - Create a connection pool. If max_connections is set, then this - object raises redis.ConnectionError when the pool's limit is reached. - - By default, TCP connections are created unless connection_class is - specified. Use redis.UnixDomainSocketConnection for unix sockets. - - Any additional keyword arguments are passed to the constructor of - connection_class. - """ - max_connections = max_connections or 2 ** 31 - if not isinstance(max_connections, (int, long)) or max_connections < 0: - raise ValueError('"max_connections" must be a positive integer') - - self.connection_class = connection_class - self.connection_kwargs = connection_kwargs - self.max_connections = max_connections - - self.reset() - - def __repr__(self): - return "%s<%s>" % ( - type(self).__name__, - repr(self.connection_class(**self.connection_kwargs)), - ) - - def reset(self): - self.pid = os.getpid() - self._created_connections = 0 - self._available_connections = [] - self._in_use_connections = set() - self._check_lock = threading.Lock() - - def _checkpid(self): - if self.pid != os.getpid(): - with self._check_lock: - if self.pid == os.getpid(): - # another thread already did the work while we waited - # on the lock. - return - self.reset() - - def get_connection(self, command_name, *keys, **options): - "Get a connection from the pool" - self._checkpid() - try: - connection = self._available_connections.pop() - except IndexError: - connection = self.make_connection() - self._in_use_connections.add(connection) - try: - # ensure this connection is connected to Redis - connection.connect() - # connections that the pool provides should be ready to send - # a command. if not, the connection was either returned to the - # pool before all data has been read or the socket has been - # closed. either way, reconnect and verify everything is good. - try: - if connection.can_read(): - raise ConnectionError('Connection has data') - except ConnectionError: - connection.disconnect() - connection.connect() - if connection.can_read(): - raise ConnectionError('Connection not ready') - except: # noqa: E722 - # release the connection back to the pool so that we don't leak it - self.release(connection) - raise - - return connection - - def get_encoder(self): - "Return an encoder based on encoding settings" - kwargs = self.connection_kwargs - return Encoder( - encoding=kwargs.get('encoding', 'utf-8'), - encoding_errors=kwargs.get('encoding_errors', 'strict'), - decode_responses=kwargs.get('decode_responses', False) - ) - - def make_connection(self): - "Create a new connection" - if self._created_connections >= self.max_connections: - raise ConnectionError("Too many connections") - self._created_connections += 1 - return self.connection_class(**self.connection_kwargs) - - def release(self, connection): - "Releases the connection back to the pool" - self._checkpid() - if connection.pid != self.pid: - return - self._in_use_connections.remove(connection) - self._available_connections.append(connection) - - def disconnect(self): - "Disconnects all connections in the pool" - self._checkpid() - all_conns = chain(self._available_connections, - self._in_use_connections) - for connection in all_conns: - connection.disconnect() - - -class BlockingConnectionPool(ConnectionPool): - """ - Thread-safe blocking connection pool:: - - >>> from redis.client import Redis - >>> client = Redis(connection_pool=BlockingConnectionPool()) - - It performs the same function as the default - ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that, - it maintains a pool of reusable connections that can be shared by - multiple redis clients (safely across threads if required). - - The difference is that, in the event that a client tries to get a - connection from the pool when all of connections are in use, rather than - raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default - ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it - makes the client wait ("blocks") for a specified number of seconds until - a connection becomes available. - - Use ``max_connections`` to increase / decrease the pool size:: - - >>> pool = BlockingConnectionPool(max_connections=10) - - Use ``timeout`` to tell it either how many seconds to wait for a connection - to become available, or to block forever: - - # Block forever. - >>> pool = BlockingConnectionPool(timeout=None) - - # Raise a ``ConnectionError`` after five seconds if a connection is - # not available. - >>> pool = BlockingConnectionPool(timeout=5) - """ - def __init__(self, max_connections=50, timeout=20, - connection_class=Connection, queue_class=LifoQueue, - **connection_kwargs): - - self.queue_class = queue_class - self.timeout = timeout - super(BlockingConnectionPool, self).__init__( - connection_class=connection_class, - max_connections=max_connections, - **connection_kwargs) - - def reset(self): - self.pid = os.getpid() - self._check_lock = threading.Lock() - - # Create and fill up a thread safe queue with ``None`` values. - self.pool = self.queue_class(self.max_connections) - while True: - try: - self.pool.put_nowait(None) - except Full: - break - - # Keep a list of actual connection instances so that we can - # disconnect them later. - self._connections = [] - - def make_connection(self): - "Make a fresh connection." - connection = self.connection_class(**self.connection_kwargs) - self._connections.append(connection) - return connection - - def get_connection(self, command_name, *keys, **options): - """ - Get a connection, blocking for ``self.timeout`` until a connection - is available from the pool. - - If the connection returned is ``None`` then creates a new connection. - Because we use a last-in first-out queue, the existing connections - (having been returned to the pool after the initial ``None`` values - were added) will be returned before ``None`` values. This means we only - create new connections when we need to, i.e.: the actual number of - connections will only increase in response to demand. - """ - # Make sure we haven't changed process. - self._checkpid() - - # Try and get a connection from the pool. If one isn't available within - # self.timeout then raise a ``ConnectionError``. - connection = None - try: - connection = self.pool.get(block=True, timeout=self.timeout) - except Empty: - # Note that this is not caught by the redis client and will be - # raised unless handled by application code. If you want never to - raise ConnectionError("No connection available.") - - # If the ``connection`` is actually ``None`` then that's a cue to make - # a new connection to add to the pool. - if connection is None: - connection = self.make_connection() - - try: - # ensure this connection is connected to Redis - connection.connect() - # connections that the pool provides should be ready to send - # a command. if not, the connection was either returned to the - # pool before all data has been read or the socket has been - # closed. either way, reconnect and verify everything is good. - try: - if connection.can_read(): - raise ConnectionError('Connection has data') - except ConnectionError: - connection.disconnect() - connection.connect() - if connection.can_read(): - raise ConnectionError('Connection not ready') - except: # noqa: E722 - # release the connection back to the pool so that we don't leak it - self.release(connection) - raise - - return connection - - def release(self, connection): - "Releases the connection back to the pool." - # Make sure we haven't changed process. - self._checkpid() - if connection.pid != self.pid: - return - - # Put the connection back into the pool. - try: - self.pool.put_nowait(connection) - except Full: - # perhaps the pool has been reset() after a fork? regardless, - # we don't want this connection - pass - - def disconnect(self): - "Disconnects all connections in the pool." - self._checkpid() - for connection in self._connections: - connection.disconnect() diff --git a/utill/rediss/exceptions.py b/utill/rediss/exceptions.py deleted file mode 100644 index e7f2cbb..0000000 --- a/utill/rediss/exceptions.py +++ /dev/null @@ -1,65 +0,0 @@ -"Core exceptions raised by the Redis client" - - -class RedisError(Exception): - pass - - -class ConnectionError(RedisError): - pass - - -class TimeoutError(RedisError): - pass - - -class AuthenticationError(ConnectionError): - pass - - -class BusyLoadingError(ConnectionError): - pass - - -class InvalidResponse(RedisError): - pass - - -class ResponseError(RedisError): - pass - - -class DataError(RedisError): - pass - - -class PubSubError(RedisError): - pass - - -class WatchError(RedisError): - pass - - -class NoScriptError(ResponseError): - pass - - -class ExecAbortError(ResponseError): - pass - - -class ReadOnlyError(ResponseError): - pass - - -class LockError(RedisError, ValueError): - "Errors acquiring or releasing a lock" - # NOTE: For backwards compatability, this class derives from ValueError. - # This was originally chosen to behave like threading.Lock. - pass - - -class LockNotOwnedError(LockError): - "Error trying to extend or release a lock that is (no longer) owned" - pass diff --git a/utill/rediss/lock.py b/utill/rediss/lock.py deleted file mode 100644 index d4b33c8..0000000 --- a/utill/rediss/lock.py +++ /dev/null @@ -1,274 +0,0 @@ -import threading -import time as mod_time -import uuid -from .exceptions import LockError, LockNotOwnedError -from .utils import dummy - - -class Lock(object): - """ - A shared, distributed Lock. Using Redis for locking allows the Lock - to be shared across processes and/or machines. - - It's left to the user to resolve deadlock issues and make sure - multiple clients play nicely together. - """ - - lua_release = None - lua_extend = None - lua_reacquire = None - - # KEYS[1] - lock name - # ARGS[1] - token - # return 1 if the lock was released, otherwise 0 - LUA_RELEASE_SCRIPT = """ - local token = redis.call('get', KEYS[1]) - if not token or token ~= ARGV[1] then - return 0 - end - redis.call('del', KEYS[1]) - return 1 - """ - - # KEYS[1] - lock name - # ARGS[1] - token - # ARGS[2] - additional milliseconds - # return 1 if the locks time was extended, otherwise 0 - LUA_EXTEND_SCRIPT = """ - local token = redis.call('get', KEYS[1]) - if not token or token ~= ARGV[1] then - return 0 - end - local expiration = redis.call('pttl', KEYS[1]) - if not expiration then - expiration = 0 - end - if expiration < 0 then - return 0 - end - redis.call('pexpire', KEYS[1], expiration + ARGV[2]) - return 1 - """ - - # KEYS[1] - lock name - # ARGS[1] - token - # ARGS[2] - milliseconds - # return 1 if the locks time was reacquired, otherwise 0 - LUA_REACQUIRE_SCRIPT = """ - local token = redis.call('get', KEYS[1]) - if not token or token ~= ARGV[1] then - return 0 - end - redis.call('pexpire', KEYS[1], ARGV[2]) - return 1 - """ - - def __init__(self, redis, name, timeout=None, sleep=0.1, - blocking=True, blocking_timeout=None, thread_local=True): - """ - Create a new Lock instance named ``name`` using the Redis client - supplied by ``redis``. - - ``timeout`` indicates a maximum life for the lock. - By default, it will remain locked until release() is called. - ``timeout`` can be specified as a float or integer, both representing - the number of seconds to wait. - - ``sleep`` indicates the amount of time to sleep per loop iteration - when the lock is in blocking mode and another client is currently - holding the lock. - - ``blocking`` indicates whether calling ``acquire`` should block until - the lock has been acquired or to fail immediately, causing ``acquire`` - to return False and the lock not being acquired. Defaults to True. - Note this value can be overridden by passing a ``blocking`` - argument to ``acquire``. - - ``blocking_timeout`` indicates the maximum amount of time in seconds to - spend trying to acquire the lock. A value of ``None`` indicates - continue trying forever. ``blocking_timeout`` can be specified as a - float or integer, both representing the number of seconds to wait. - - ``thread_local`` indicates whether the lock token is placed in - thread-local storage. By default, the token is placed in thread local - storage so that a thread only sees its token, not a token set by - another thread. Consider the following timeline: - - time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. - thread-1 sets the token to "abc" - time: 1, thread-2 blocks trying to acquire `my-lock` using the - Lock instance. - time: 5, thread-1 has not yet completed. redis expires the lock - key. - time: 5, thread-2 acquired `my-lock` now that it's available. - thread-2 sets the token to "xyz" - time: 6, thread-1 finishes its work and calls release(). if the - token is *not* stored in thread local storage, then - thread-1 would see the token value as "xyz" and would be - able to successfully release the thread-2's lock. - - In some use cases it's necessary to disable thread local storage. For - example, if you have code where one thread acquires a lock and passes - that lock instance to a worker thread to release later. If thread - local storage isn't disabled in this case, the worker thread won't see - the token set by the thread that acquired the lock. Our assumption - is that these cases aren't common and as such default to using - thread local storage. - """ - self.redis = redis - self.name = name - self.timeout = timeout - self.sleep = sleep - self.blocking = blocking - self.blocking_timeout = blocking_timeout - self.thread_local = bool(thread_local) - self.local = threading.local() if self.thread_local else dummy() - self.local.token = None - if self.timeout and self.sleep > self.timeout: - raise LockError("'sleep' must be less than 'timeout'") - self.register_scripts() - - def register_scripts(self): - cls = self.__class__ - client = self.redis - if cls.lua_release is None: - cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) - if cls.lua_extend is None: - cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT) - if cls.lua_reacquire is None: - cls.lua_reacquire = \ - client.register_script(cls.LUA_REACQUIRE_SCRIPT) - - def __enter__(self): - # force blocking, as otherwise the user would have to check whether - # the lock was actually acquired or not. - if self.acquire(blocking=True): - return self - raise LockError("Unable to acquire lock within the time specified") - - def __exit__(self, exc_type, exc_value, traceback): - self.release() - - def acquire(self, blocking=None, blocking_timeout=None, token=None): - """ - Use Redis to hold a shared, distributed lock named ``name``. - Returns True once the lock is acquired. - - If ``blocking`` is False, always return immediately. If the lock - was acquired, return True, otherwise return False. - - ``blocking_timeout`` specifies the maximum number of seconds to - wait trying to acquire the lock. - - ``token`` specifies the token value to be used. If provided, token - must be a bytes object or a string that can be encoded to a bytes - object with the default encoding. If a token isn't specified, a UUID - will be generated. - """ - sleep = self.sleep - if token is None: - token = uuid.uuid1().hex.encode() - else: - encoder = self.redis.connection_pool.get_encoder() - token = encoder.encode(token) - if blocking is None: - blocking = self.blocking - if blocking_timeout is None: - blocking_timeout = self.blocking_timeout - stop_trying_at = None - if blocking_timeout is not None: - stop_trying_at = mod_time.time() + blocking_timeout - while True: - if self.do_acquire(token): - self.local.token = token - return True - if not blocking: - return False - if stop_trying_at is not None and mod_time.time() > stop_trying_at: - return False - mod_time.sleep(sleep) - - def do_acquire(self, token): - if self.timeout: - # convert to milliseconds - timeout = int(self.timeout * 1000) - else: - timeout = None - if self.redis.set(self.name, token, nx=True, px=timeout): - return True - return False - - def locked(self): - """ - Returns True if this key is locked by any process, otherwise False. - """ - return self.redis.get(self.name) is not None - - def owned(self): - """ - Returns True if this key is locked by this lock, otherwise False. - """ - stored_token = self.redis.get(self.name) - # need to always compare bytes to bytes - # TODO: this can be simplified when the context manager is finished - if stored_token and not isinstance(stored_token, bytes): - encoder = self.redis.connection_pool.get_encoder() - stored_token = encoder.encode(stored_token) - return self.local.token is not None and \ - stored_token == self.local.token - - def release(self): - "Releases the already acquired lock" - expected_token = self.local.token - if expected_token is None: - raise LockError("Cannot release an unlocked lock") - self.local.token = None - self.do_release(expected_token) - - def do_release(self, expected_token): - if not bool(self.lua_release(keys=[self.name], - args=[expected_token], - client=self.redis)): - raise LockNotOwnedError("Cannot release a lock" - " that's no longer owned") - - def extend(self, additional_time): - """ - Adds more time to an already acquired lock. - - ``additional_time`` can be specified as an integer or a float, both - representing the number of seconds to add. - """ - if self.local.token is None: - raise LockError("Cannot extend an unlocked lock") - if self.timeout is None: - raise LockError("Cannot extend a lock with no timeout") - return self.do_extend(additional_time) - - def do_extend(self, additional_time): - additional_time = int(additional_time * 1000) - if not bool(self.lua_extend(keys=[self.name], - args=[self.local.token, additional_time], - client=self.redis)): - raise LockNotOwnedError("Cannot extend a lock that's" - " no longer owned") - return True - - def reacquire(self): - """ - Resets a TTL of an already acquired lock back to a timeout value. - """ - if self.local.token is None: - raise LockError("Cannot reacquire an unlocked lock") - if self.timeout is None: - raise LockError("Cannot reacquire a lock with no timeout") - return self.do_reacquire() - - def do_reacquire(self): - timeout = int(self.timeout * 1000) - if not bool(self.lua_reacquire(keys=[self.name], - args=[self.local.token, timeout], - client=self.redis)): - raise LockNotOwnedError("Cannot reacquire a lock that's" - " no longer owned") - return True diff --git a/utill/rediss/sentinel.py b/utill/rediss/sentinel.py deleted file mode 100644 index 11263d2..0000000 --- a/utill/rediss/sentinel.py +++ /dev/null @@ -1,286 +0,0 @@ -import random -import weakref - -from redis.client import Redis -from redis.connection import ConnectionPool, Connection -from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError, - TimeoutError) -from redis._compat import iteritems, nativestr, xrange - - -class MasterNotFoundError(ConnectionError): - pass - - -class SlaveNotFoundError(ConnectionError): - pass - - -class SentinelManagedConnection(Connection): - def __init__(self, **kwargs): - self.connection_pool = kwargs.pop('connection_pool') - super(SentinelManagedConnection, self).__init__(**kwargs) - - def __repr__(self): - pool = self.connection_pool - s = '%s' % (type(self).__name__, pool.service_name) - if self.host: - host_info = ',host=%s,port=%s' % (self.host, self.port) - s = s % host_info - return s - - def connect_to(self, address): - self.host, self.port = address - super(SentinelManagedConnection, self).connect() - if self.connection_pool.check_connection: - self.send_command('PING') - if nativestr(self.read_response()) != 'PONG': - raise ConnectionError('PING failed') - - def connect(self): - if self._sock: - return # already connected - if self.connection_pool.is_master: - self.connect_to(self.connection_pool.get_master_address()) - else: - for slave in self.connection_pool.rotate_slaves(): - try: - return self.connect_to(slave) - except ConnectionError: - continue - raise SlaveNotFoundError # Never be here - - def read_response(self): - try: - return super(SentinelManagedConnection, self).read_response() - except ReadOnlyError: - if self.connection_pool.is_master: - # When talking to a master, a ReadOnlyError when likely - # indicates that the previous master that we're still connected - # to has been demoted to a slave and there's a new master. - # calling disconnect will force the connection to re-query - # sentinel during the next connect() attempt. - self.disconnect() - raise ConnectionError('The previous master is now a slave') - raise - - -class SentinelConnectionPool(ConnectionPool): - """ - Sentinel backed connection pool. - - If ``check_connection`` flag is set to True, SentinelManagedConnection - sends a PING command right after establishing the connection. - """ - - def __init__(self, service_name, sentinel_manager, **kwargs): - kwargs['connection_class'] = kwargs.get( - 'connection_class', SentinelManagedConnection) - self.is_master = kwargs.pop('is_master', True) - self.check_connection = kwargs.pop('check_connection', False) - super(SentinelConnectionPool, self).__init__(**kwargs) - self.connection_kwargs['connection_pool'] = weakref.proxy(self) - self.service_name = service_name - self.sentinel_manager = sentinel_manager - - def __repr__(self): - return "%s>> from redis.sentinel import Sentinel - >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) - >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) - >>> master.set('foo', 'bar') - >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) - >>> slave.get('foo') - 'bar' - - ``sentinels`` is a list of sentinel nodes. Each node is represented by - a pair (hostname, port). - - ``min_other_sentinels`` defined a minimum number of peers for a sentinel. - When querying a sentinel, if it doesn't meet this threshold, responses - from that sentinel won't be considered valid. - - ``sentinel_kwargs`` is a dictionary of connection arguments used when - connecting to sentinel instances. Any argument that can be passed to - a normal Redis connection can be specified here. If ``sentinel_kwargs`` is - not specified, any socket_timeout and socket_keepalive options specified - in ``connection_kwargs`` will be used. - - ``connection_kwargs`` are keyword arguments that will be used when - establishing a connection to a Redis server. - """ - - def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, - **connection_kwargs): - # if sentinel_kwargs isn't defined, use the socket_* options from - # connection_kwargs - if sentinel_kwargs is None: - sentinel_kwargs = { - k: v - for k, v in iteritems(connection_kwargs) - if k.startswith('socket_') - } - self.sentinel_kwargs = sentinel_kwargs - - self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs) - for hostname, port in sentinels] - self.min_other_sentinels = min_other_sentinels - self.connection_kwargs = connection_kwargs - - def __repr__(self): - sentinel_addresses = [] - for sentinel in self.sentinels: - sentinel_addresses.append('%s:%s' % ( - sentinel.connection_pool.connection_kwargs['host'], - sentinel.connection_pool.connection_kwargs['port'], - )) - return '%s' % ( - type(self).__name__, - ','.join(sentinel_addresses)) - - def check_master_state(self, state, service_name): - if not state['is_master'] or state['is_sdown'] or state['is_odown']: - return False - # Check if our sentinel doesn't see other nodes - if state['num-other-sentinels'] < self.min_other_sentinels: - return False - return True - - def discover_master(self, service_name): - """ - Asks sentinel servers for the Redis master's address corresponding - to the service labeled ``service_name``. - - Returns a pair (address, port) or raises MasterNotFoundError if no - master is found. - """ - for sentinel_no, sentinel in enumerate(self.sentinels): - try: - masters = sentinel.sentinel_masters() - except (ConnectionError, TimeoutError): - continue - state = masters.get(service_name) - if state and self.check_master_state(state, service_name): - # Put this sentinel at the top of the list - self.sentinels[0], self.sentinels[sentinel_no] = ( - sentinel, self.sentinels[0]) - return state['ip'], state['port'] - raise MasterNotFoundError("No master found for %r" % (service_name,)) - - def filter_slaves(self, slaves): - "Remove slaves that are in an ODOWN or SDOWN state" - slaves_alive = [] - for slave in slaves: - if slave['is_odown'] or slave['is_sdown']: - continue - slaves_alive.append((slave['ip'], slave['port'])) - return slaves_alive - - def discover_slaves(self, service_name): - "Returns a list of alive slaves for service ``service_name``" - for sentinel in self.sentinels: - try: - slaves = sentinel.sentinel_slaves(service_name) - except (ConnectionError, ResponseError, TimeoutError): - continue - slaves = self.filter_slaves(slaves) - if slaves: - return slaves - return [] - - def master_for(self, service_name, redis_class=Redis, - connection_pool_class=SentinelConnectionPool, **kwargs): - """ - Returns a redis client instance for the ``service_name`` master. - - A SentinelConnectionPool class is used to retrive the master's - address before establishing a new connection. - - NOTE: If the master's address has changed, any cached connections to - the old master are closed. - - By default clients will be a redis.Redis instance. Specify a - different class to the ``redis_class`` argument if you desire - something different. - - The ``connection_pool_class`` specifies the connection pool to use. - The SentinelConnectionPool will be used by default. - - All other keyword arguments are merged with any connection_kwargs - passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. - """ - kwargs['is_master'] = True - connection_kwargs = dict(self.connection_kwargs) - connection_kwargs.update(kwargs) - return redis_class(connection_pool=connection_pool_class( - service_name, self, **connection_kwargs)) - - def slave_for(self, service_name, redis_class=Redis, - connection_pool_class=SentinelConnectionPool, **kwargs): - """ - Returns redis client instance for the ``service_name`` slave(s). - - A SentinelConnectionPool class is used to retrive the slave's - address before establishing a new connection. - - By default clients will be a redis.Redis instance. Specify a - different class to the ``redis_class`` argument if you desire - something different. - - The ``connection_pool_class`` specifies the connection pool to use. - The SentinelConnectionPool will be used by default. - - All other keyword arguments are merged with any connection_kwargs - passed to this class and passed to the connection pool as keyword - arguments to be used to initialize Redis connections. - """ - kwargs['is_master'] = False - connection_kwargs = dict(self.connection_kwargs) - connection_kwargs.update(kwargs) - return redis_class(connection_pool=connection_pool_class( - service_name, self, **connection_kwargs)) diff --git a/utill/rediss/utils.py b/utill/rediss/utils.py deleted file mode 100644 index 0b0067e..0000000 --- a/utill/rediss/utils.py +++ /dev/null @@ -1,33 +0,0 @@ -from contextlib import contextmanager - - -try: - import hiredis - HIREDIS_AVAILABLE = True -except ImportError: - HIREDIS_AVAILABLE = False - - -def from_url(url, db=None, **kwargs): - """ - Returns an active Redis client generated from the given database URL. - - Will attempt to extract the database id from the path url fragment, if - none is provided. - """ - from redis.client import Redis - return Redis.from_url(url, db, **kwargs) - - -@contextmanager -def pipeline(redis_obj): - p = redis_obj.pipeline() - yield p - p.execute() - - -class dummy(object): - """ - Instances of this class can be used as an attribute container. - """ - pass -- Gitee From 910a17673a429250076d6e6d3c6101ac14bf8e16 Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 12 May 2020 01:34:38 +0800 Subject: [PATCH 12/13] kun --- .gitignore | 15 + Events.py | 57 + LICENSE | 21 + README.md | 66 + __init__.py | 2 + app.py | 824 ++++ application/__init__.py | 5 + application/api/__init__.py | 2 + application/api/common/__init__.py | 19 + application/api/common/autoload.py | 2 + application/api/controller/__init__.py | 5 + application/api/controller/v1/__init__.py | 4 + application/api/controller/v1/index.py | 16 + application/api/controller/v2/__init__.py | 4 + application/api/controller/v2/index.py | 10 + application/api/tpl/v1/index/index.html | 29 + application/api/tpl/v2/index/index.html | 29 + application/common/__init__.py | 75 + application/common/autoload.py | 5 + application/config/__init__.py | 40 + application/config/other.py | 15 + common/__init__.py | 5 + common/autoload.py | 323 ++ common/globals.py | 9 + common/request.py | 44 + common/session.py | 62 + config/__init__.py | 103 + create.py | 95 + tpl/error.html | 27 + utill/Queues | Bin 0 -> 12288 bytes utill/app.py | 35 + utill/cache/cache.py | 261 ++ utill/dateutil/__init__.py | 8 + utill/dateutil/_common.py | 43 + utill/dateutil/_version.py | 4 + utill/dateutil/easter.py | 89 + utill/dateutil/parser/__init__.py | 60 + utill/dateutil/parser/_parser.py | 1580 ++++++++ utill/dateutil/parser/isoparser.py | 411 ++ utill/dateutil/relativedelta.py | 599 +++ utill/dateutil/rrule.py | 1736 +++++++++ utill/dateutil/tz/__init__.py | 17 + utill/dateutil/tz/_common.py | 419 ++ utill/dateutil/tz/_factories.py | 73 + utill/dateutil/tz/tz.py | 1836 +++++++++ utill/dateutil/tz/win.py | 370 ++ utill/dateutil/tzwin.py | 2 + utill/dateutil/utils.py | 71 + utill/dateutil/zoneinfo/__init__.py | 167 + utill/dateutil/zoneinfo/rebuild.py | 53 + utill/db/model.py | 220 ++ utill/db/mongodb.py | 332 ++ utill/db/mysql.py | 1020 +++++ utill/db/pymysql/__init__.py | 141 + utill/db/pymysql/_auth.py | 265 ++ utill/db/pymysql/_compat.py | 21 + utill/db/pymysql/_socketio.py | 134 + utill/db/pymysql/charset.py | 212 + utill/db/pymysql/connections.py | 1279 ++++++ utill/db/pymysql/constants/CLIENT.py | 31 + utill/db/pymysql/constants/COMMAND.py | 33 + utill/db/pymysql/constants/CR.py | 68 + utill/db/pymysql/constants/ER.py | 475 +++ utill/db/pymysql/constants/FIELD_TYPE.py | 33 + utill/db/pymysql/constants/FLAG.py | 15 + utill/db/pymysql/constants/SERVER_STATUS.py | 11 + utill/db/pymysql/constants/__init__.py | 0 utill/db/pymysql/converters.py | 411 ++ utill/db/pymysql/cursors.py | 536 +++ utill/db/pymysql/err.py | 109 + utill/db/pymysql/optionfile.py | 23 + utill/db/pymysql/protocol.py | 341 ++ utill/db/pymysql/times.py | 20 + utill/db/pymysql/util.py | 13 + utill/db/sqlite.py | 666 ++++ utill/db/sqlitedata/kcwdb | 0 utill/filetype/__init__.py | 10 + utill/filetype/filetype.py | 98 + utill/filetype/helpers.py | 122 + utill/filetype/match.py | 119 + utill/filetype/types/__init__.py | 83 + utill/filetype/types/archive.py | 515 +++ utill/filetype/types/audio.py | 166 + utill/filetype/types/base.py | 31 + utill/filetype/types/font.py | 99 + utill/filetype/types/image.py | 279 ++ utill/filetype/types/isobmff.py | 33 + utill/filetype/types/video.py | 216 ++ utill/filetype/utils.py | 72 + utill/http.py | 83 + utill/queues.py | 101 + utill/redis.py | 210 + utill/rediss/__init__.py | 41 + utill/rediss/_compat.py | 138 + utill/rediss/client.py | 3865 +++++++++++++++++++ utill/rediss/connection.py | 1261 ++++++ utill/rediss/exceptions.py | 65 + utill/rediss/lock.py | 274 ++ utill/rediss/sentinel.py | 286 ++ utill/rediss/utils.py | 33 + 100 files changed, 24356 insertions(+) create mode 100644 .gitignore create mode 100644 Events.py create mode 100644 LICENSE create mode 100644 README.md create mode 100644 __init__.py create mode 100644 app.py create mode 100644 application/__init__.py create mode 100644 application/api/__init__.py create mode 100644 application/api/common/__init__.py create mode 100644 application/api/common/autoload.py create mode 100644 application/api/controller/__init__.py create mode 100644 application/api/controller/v1/__init__.py create mode 100644 application/api/controller/v1/index.py create mode 100644 application/api/controller/v2/__init__.py create mode 100644 application/api/controller/v2/index.py create mode 100644 application/api/tpl/v1/index/index.html create mode 100644 application/api/tpl/v2/index/index.html create mode 100644 application/common/__init__.py create mode 100644 application/common/autoload.py create mode 100644 application/config/__init__.py create mode 100644 application/config/other.py create mode 100644 common/__init__.py create mode 100644 common/autoload.py create mode 100644 common/globals.py create mode 100644 common/request.py create mode 100644 common/session.py create mode 100644 config/__init__.py create mode 100644 create.py create mode 100644 tpl/error.html create mode 100644 utill/Queues create mode 100644 utill/app.py create mode 100644 utill/cache/cache.py create mode 100644 utill/dateutil/__init__.py create mode 100644 utill/dateutil/_common.py create mode 100644 utill/dateutil/_version.py create mode 100644 utill/dateutil/easter.py create mode 100644 utill/dateutil/parser/__init__.py create mode 100644 utill/dateutil/parser/_parser.py create mode 100644 utill/dateutil/parser/isoparser.py create mode 100644 utill/dateutil/relativedelta.py create mode 100644 utill/dateutil/rrule.py create mode 100644 utill/dateutil/tz/__init__.py create mode 100644 utill/dateutil/tz/_common.py create mode 100644 utill/dateutil/tz/_factories.py create mode 100644 utill/dateutil/tz/tz.py create mode 100644 utill/dateutil/tz/win.py create mode 100644 utill/dateutil/tzwin.py create mode 100644 utill/dateutil/utils.py create mode 100644 utill/dateutil/zoneinfo/__init__.py create mode 100644 utill/dateutil/zoneinfo/rebuild.py create mode 100644 utill/db/model.py create mode 100644 utill/db/mongodb.py create mode 100644 utill/db/mysql.py create mode 100644 utill/db/pymysql/__init__.py create mode 100644 utill/db/pymysql/_auth.py create mode 100644 utill/db/pymysql/_compat.py create mode 100644 utill/db/pymysql/_socketio.py create mode 100644 utill/db/pymysql/charset.py create mode 100644 utill/db/pymysql/connections.py create mode 100644 utill/db/pymysql/constants/CLIENT.py create mode 100644 utill/db/pymysql/constants/COMMAND.py create mode 100644 utill/db/pymysql/constants/CR.py create mode 100644 utill/db/pymysql/constants/ER.py create mode 100644 utill/db/pymysql/constants/FIELD_TYPE.py create mode 100644 utill/db/pymysql/constants/FLAG.py create mode 100644 utill/db/pymysql/constants/SERVER_STATUS.py create mode 100644 utill/db/pymysql/constants/__init__.py create mode 100644 utill/db/pymysql/converters.py create mode 100644 utill/db/pymysql/cursors.py create mode 100644 utill/db/pymysql/err.py create mode 100644 utill/db/pymysql/optionfile.py create mode 100644 utill/db/pymysql/protocol.py create mode 100644 utill/db/pymysql/times.py create mode 100644 utill/db/pymysql/util.py create mode 100644 utill/db/sqlite.py create mode 100644 utill/db/sqlitedata/kcwdb create mode 100644 utill/filetype/__init__.py create mode 100644 utill/filetype/filetype.py create mode 100644 utill/filetype/helpers.py create mode 100644 utill/filetype/match.py create mode 100644 utill/filetype/types/__init__.py create mode 100644 utill/filetype/types/archive.py create mode 100644 utill/filetype/types/audio.py create mode 100644 utill/filetype/types/base.py create mode 100644 utill/filetype/types/font.py create mode 100644 utill/filetype/types/image.py create mode 100644 utill/filetype/types/isobmff.py create mode 100644 utill/filetype/types/video.py create mode 100644 utill/filetype/utils.py create mode 100644 utill/http.py create mode 100644 utill/queues.py create mode 100644 utill/redis.py create mode 100644 utill/rediss/__init__.py create mode 100644 utill/rediss/_compat.py create mode 100644 utill/rediss/client.py create mode 100644 utill/rediss/connection.py create mode 100644 utill/rediss/exceptions.py create mode 100644 utill/rediss/lock.py create mode 100644 utill/rediss/sentinel.py create mode 100644 utill/rediss/utils.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b055fa8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,15 @@ +#以下文件不允许提交到git +__pycache__ +.settings +.buildpath +.project +*.log +*.pyc +*log/ +/.idea +/.vscode +/dist +/kcweb.egg-info +/file +utill/db/Queues +utill/db/sqlitedata/kcwlicuxweb \ No newline at end of file diff --git a/Events.py b/Events.py new file mode 100644 index 0000000..574d83f --- /dev/null +++ b/Events.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +import os, sys, time, subprocess +from watchdog.observers import Observer +from watchdog.events import FileSystemEventHandler +class MyFileSystemEventHander(FileSystemEventHandler): + def __init__(self, fn): + super(MyFileSystemEventHander, self).__init__() + self.restart = fn + + def on_any_event(self, event): + if event.src_path.endswith('.py'): + print('* 更新文件:%s' % event.src_path) + self.restart() +class Events: + command = ['echo', 'ok'] + process = None + def __init__(self,argv): + argv=argv + # print(argv) + if 'python' not in argv[0]: + argv.insert(0, 'python3') + self.command = argv + # print(self.command) + paths = os.path.abspath('.') + self.start_watch(paths, None) + + def kill_process(self): + "关闭" + if self.process: + self.process.kill() + self.process.wait() + self.process = None + + def start_process(self): + "启动" + self.process = subprocess.Popen(self.command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr) + + def restart_process(self): + "重启" + self.kill_process() + self.start_process() + + def start_watch(self,path, callback): + "执行" + observer = Observer() + observer.schedule(MyFileSystemEventHander(self.restart_process), path, recursive=True) + observer.start() + self.start_process() + try: + while True: + time.sleep(0.5) + except KeyboardInterrupt: + self.kill_process() + # observer.stop() + # observer.join() + +# Events(['server.py']) #执行server.py文件 \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..6cebfc2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 坤坤 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..7dee6dd --- /dev/null +++ b/README.md @@ -0,0 +1,66 @@ +完整文档请参考:http://intapp.kwebapp.cn/index/index/doc/docde/1 +#### 创建应用 +- 新建一个server.py文件,内容如下,执行python3 server.py创建应用 +- 如下面的代码创建了一个app应用,同时在app应用下创建了一个api模块 +```server.py +from kcweb.create import create +create("app","api") # 创建项目 +``` + +- 您的目录结构应该是这样,如下: +``` +├─./ 框架目录 +├─app 公共方法目录 +│ ├─common 公共函数目录 +│ │ ├─__init__.py 函数文件 +│ ├─config 配置目录 +│ │ ├─__init__.py 配置文件 +│ ├─api 模块目录 +│ │ ├─common 该模块的公共函数目录 +│ │ │ ├─__init__.py 函数文件 +│ │ ├─controller 控制器目录 +│ │ │ ├─__init__.py 版本初始化文件 +│ │ │ ├─v1 +│ │ │ │ ├─__init__.py 函数初始化文件 +│ │ │ │ ├─index.py 控制器文件 +│ │ │ ├─v2 +│ │ │ │ ├─__init__.py 函数初始化文件 +│ │ │ │ ├─index.py 控制器文件 +│ │ ├─tpl 模板文件目录 +│ │ │ ├─v1 +│ │ │ │ ├─index +│ │ │ │ │ ├─index.html 模块文件 +│ │ │ ├─v1 +│ │ │ │ ├─index +│ │ │ │ │ ├─index.html 模块文件 +│ │ ├─__init__.py 控制器初始化文件 +│ ├─static 静态资源目录 +│ ├─runtime 缓存目录 +│ ├─__init__.py 自动导入模块文件 +├─server.py 应用创建后生成的运行文件(应用创建时自动创建) +``` +- 其中server.py文件内容将被修改如下 +``` +# #gunicorn -b 0.0.0.0:39001 server:app +from kcweb import web +import app as application +app=web(__name__,application) +if __name__ == "__main__": + #app 是当前文件名 host监听ip port端口 name python解释器名字 (windows一般是python linux一般是python3) + app.run("server",host="0.0.0.0",port="39001",name="python") +``` +- 如果您当前系统的python解释器名字是python3,你应该是在当前目录下执行python3 server.py。 然后访问127.0.0.1:39001 + + +- 如果您当前系统的python解释器名字是python,您应该修改server.py代码如下 +``` +# #gunicorn -b 0.0.0.0:39001 server:app +from kcweb import web +import app as application +app=web(__name__,application) +if __name__ == "__main__": + #app 是当前文件名 host监听ip port端口 + app.run("server",host="0.0.0.0",port="39001",name="python") +``` +然后访问127.0.0.1:39001 + diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..72fc2be --- /dev/null +++ b/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from .app import web \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000..53cb075 --- /dev/null +++ b/app.py @@ -0,0 +1,824 @@ +# -*- coding: utf-8 -*- +import socket,time,re,os,sys,traceback,threading,urllib +from . Events import Events +from . common import * +from . import config +from mako.template import Template +from datetime import datetime +from threading import local +from .utill import filetype +from kcweb.utill.cache import cache as kcwcache +class web: + __name=None + __appname=None + __config=config + def __new__(self,name,appname=None): + self.__name=name + self.__appname=appname + if self.__name != '__main__': + def apps(env, start_response): + # REQUEST_METHOD=env['REQUEST_METHOD'] #GET + # QUERY_STRING=env['QUERY_STRING'] #a=1&b=1 + # RAW_URI=env['RAW_URI'] #/aa/bb/cc?a=1&b=1 + # SERVER_PROTOCOL=env['SERVER_PROTOCOL'] #HTTP/1.1 + # HTTP_HOST=env['HTTP_HOST'] #212.129.149.238:39010 + # HTTP_COOKIE=env['HTTP_COOKIE'] #cookie + # REMOTE_ADDR=env['REMOTE_ADDR'] #27.156.27.201 + # PATH_INFO=env['PATH_INFO'] #/aa/bb/cc + # HTTP_USER_AGENT=env['HTTP_USER_AGENT'] #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 + try: + env['BODY_DATA']=str(env['wsgi.input'].next(), encoding = "utf-8") + except: + env['BODY_DATA']="" + p=(config.app['staticpath']+env['RAW_URI'].replace(' ','')) + status='200 ok' + if os.path.isfile(p): + kind = filetype.guess(p) + if kind is None: + f=open(p,"rb") + body=f.read() + f.close() + resheader=[ + ("Cache-Control","public, max-age=43200"), + ] + else: + f=open(p,"rb") + body=f.read() + f.close() + resheader=[ + ("Content-Type",kind.mime), + ("Cache-Control","public, max-age=43200"), + ("Accept-Ranges","bytes"), + # ("Content-Length",len(body)) + ] + else: + status,resheader,body=self.__routes(self,env) + if type(body) is bytes: + pass + else: + body=bytes(body, encoding='utf-8') + # print(env['bodydata']) + # print("\n\nwsgi.input",env['wsgi.input']) + # print("\n\ndir(env['wsgi.input'])",dir(env['wsgi.input'])) + # print("\n\nenv['wsgi.input'].__dict__",env['wsgi.input'].__dict__) + # try: + # print("\n\nwsgi.input.buf()",env['wsgi.input'].buf()) + # except Exception as e: + # print("\n\nwsgi.input.buf() error:",e) + # try: + # print("\n\nwsgi.input.next()",env['wsgi.input'].next()) + # except Exception as e: + # print("\n\nwsgi.input.next() error:",e) + # try: + # print("\n\nwsgi.input.read()",env['wsgi.input'].read()) + # except Exception as e: + # print("\n\nwsgi.input.read() error:",e) + # try: + # print("\n\nwsgi.input.reader()",env['wsgi.input'].reader()) + # except Exception as e: + # print("\n\nwsgi.input.reader() error:",e) + # try: + # print("\n\nwsgi.input.readline()",env['wsgi.input'].readline()) + # except Exception as e: + # print("\n\nwsgi.input.readline() error:",e) + # try: + # print("\n\nwsgi.input.readlines()",env['wsgi.input'].readlines()) + # except Exception as e: + # print("\n\nwsgi.input.readlines() error:",e) + # try: + # print("wsgi.input.aa",env['wsgi.input'].get("SCRIPT_NAME", "")) + # except Exception as e: + # print("wsgi.input.get('aa') error:",e) + # try: + # print("wsgi.input.aa",env['wsgi.input']['aa']) + # except Exception as e: + # print("wsgi.input['aa'] error:",e) + # print(dir(env['wsgi.input']).getsize) + # from io import StringIO + # stdout = StringIO() + # print("Hello world!", file=stdout) + # print(file=stdout) + # h = sorted(env.items()) + # for k,v in h: + # print(k,'=',repr(v), file=stdout) + # print(stdout.getvalue().encode("utf-8")) + start_response(status,resheader) + return [body] + return apps + else: + return super().__new__(self) + def run(self,host="127.0.0.1",port="39001",name='python'): + """运行开发环境 + + host: 监听地址 + + port: 端口 + + name: python命令行解释机名字 默认python + """ + if len(sys.argv)==1 or (len(sys.argv)==2 and sys.argv[1]=='eventlog'): + filename=sys.argv[0][:-3] + if self.__config.app['app_debug']: + arg=sys.argv + if len(arg)==2 and arg[1]=='eventlog': + self.__impl(host=host,port=port,filename=filename) + else: + Events([name,str(filename)+'.py','eventlog']) + else: + self.__impl( + host=host, + port=port, + filename=filename + ) + else: + try: + RAW_URI=sys.argv[1] + except:pass + else: + PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc + if PATH_INFO[0]=='/': + PATH_INFO=PATH_INFO[1:] + QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 + reqheader={ + 'REQUEST_METHOD':'GET', + 'RAW_URI':RAW_URI, + 'PATH_INFO':PATH_INFO, + 'QUERY_STRING':QUERY_STRING, + 'SERVER_PROTOCOL':'', + 'HTTP_HOST':'', + 'HTTP_COOKIE':'', + 'REMOTE_ADDR':'', + 'HTTP_USER_AGENT':'', + 'BODY_DATA':'' + } + status,resheader,body=self.__routes(reqheader) + if 'body' not in body and 'html' not in body and '<' not in body and '>' not in body: + print(body) + exit() + def __impl(self,host,port,filename): + "运行测试服务器" + try: + self.__http_server( + host=host, + port=port, + filename=filename + ) + except KeyboardInterrupt: + pass + def __get_modular(self,header): + "获取模块" + modular='' + route=self.__config.route + if route['modular']: + if isinstance(route['modular'],str): + modular=route['modular'] + else: + HTTP_HOST=header['HTTP_HOST'].split(".")[0] + for mk in route['modular']: + if HTTP_HOST in mk: + modular=mk[HTTP_HOST] + return modular + def __getconfigroute(self,PATH_INFO,header): + "使用配置路由" + route=self.__config.route + routedefault=route['default'] + methods=route['methods'] + paths='' + for path in PATH_INFO: + paths+="/"+path + try: + for item in route['children']: + if ':' in item['path']: + path=item['path'].split(':') + if(len(path)==len(PATH_INFO)): + is_pp=False + try: + item['methods'] + except:pass + else: + methods=item['methods'] + for k in methods: #匹配请求方式 + if header['REQUEST_METHOD'] in k: + is_pp=True + break + if path[0]==paths[:len(path[0])] and is_pp: + del PATH_INFO[0] + cs=PATH_INFO + PATH_INFO=item['component'].split('/') + for v in cs: + PATH_INFO.append(v) + routedefault=True + break + elif item['path']==paths or item['path']+'/'==paths: + PATH_INFO=item['component'].split('/') + routedefault=True + break + except:pass + return routedefault,PATH_INFO + def defaultroute(self,header,PATH_INFO): + "路由匹配" + route=self.__config.route + modular=web.__get_modular(self,header) + routedefault=route['default'] + methods=route['methods'] + if routedefault: + edition='index' + files=route['files'] + funct=route['funct'] + else: + edition='' + files='' + funct='' + param=[] + urls='' + i=0 + HTTP_HOST=header['HTTP_HOST'].split(".")[0] + ##默认路由start ################################################################################# + + if modular: + if route['edition']: #匹配模块并且匹配了版本 + edition=route['edition'] + routedefault,PATH_INFO=web.__getconfigroute( + self, + PATH_INFO, + header + ) + if routedefault: #使用路由 + for path in PATH_INFO: + if path: + if i==0: + files=path + urls=urls+"/"+str(path) + elif i==1: + funct=path + urls=urls+"/"+str(path) + else: + param.append(urllib.parse.unquote(path)) + i+=1 + else: #配置模块没有配置版本 + routedefault,PATH_INFO=web.__getconfigroute( + self, + PATH_INFO, + header + ) + if routedefault: #使用默认路由 + for path in PATH_INFO: + if path: + if i==0: + edition=path + elif i==1: + files=path + urls=urls+"/"+str(path) + elif i==2: + funct=path + urls=urls+"/"+str(path) + else: + param.append(urllib.parse.unquote(path)) + i+=1 + elif route['edition']: #配置版本的但没有匹配模块 + edition=route['edition'] + routedefault,PATH_INFO=web.__getconfigroute( + self, + PATH_INFO, + header + ) + if routedefault: #使用默认路由 + for path in PATH_INFO: + if path: + if i==0: + modular=path + elif i==1: + files=path + urls=urls+"/"+str(path) + elif i==2: + funct=path + urls=urls+"/"+str(path) + else: + param.append(urllib.parse.unquote(path)) + i+=1 + else: #完全默认 + routedefault,PATH_INFO=web.__getconfigroute(self,PATH_INFO,header) + if routedefault: #使用默认路由 + for path in PATH_INFO: + if path: + if i==0: + modular=path + elif i==1: + edition=path + elif i==2: + files=path + urls=urls+"/"+str(path) + elif i==3: + funct=path + urls=urls+"/"+str(path) + else: + param.append(urllib.parse.unquote(path)) + i+=1 + #默认路由end ############################################################ + return methods,modular,edition,files,funct,tuple(param) + def __tran(self,data,status,resheader): + "转换控制器返回的内容" + if isinstance(data,tuple): + i=0 + for item in data: + if i==0: + body=item + elif i==1: + status=item + elif i==2: + if isinstance(item,dict): + for key in item: + resheader[key]=item[key] + else: + raise Exception('错误!这个不是一个字典') + else: + break + i+=1 + else: + body=data + return body,status,resheader + def __set_globals(self,header): + globals.HEADER.Method=header['REQUEST_METHOD'] + globals.HEADER.URL=header['RAW_URI'] + globals.HEADER.PATH_INFO=header['PATH_INFO'] + globals.HEADER.QUERY_STRING=header['QUERY_STRING'] + globals.HEADER.SERVER_PROTOCOL=header['SERVER_PROTOCOL'] + globals.HEADER.HTTP_HOST=header['HTTP_HOST'] + globals.HEADER.BODY_DATA=header['BODY_DATA'] + try: + globals.HEADER.HTTP_COOKIE=header['HTTP_COOKIE'] + except: + globals.HEADER.HTTP_COOKIE=None + globals.HEADER.HTTP_USER_AGENT=header['HTTP_USER_AGENT'] + def __del_globals(): + globals.VAR = local() + globals.HEADER = local() + globals.G = local() + def __routes(self,header): + body="这是一个http测试服务器" + status="200 ok" + resheader={"Content-Type":"text/html; charset=utf-8"} + web.__set_globals(self,header) + PATH_INFO=header['PATH_INFO'].split('/') + if PATH_INFO[0]==' ' or PATH_INFO[0]=='': + del PATH_INFO[0] + methods,modular,edition,files,funct,param=web.defaultroute(self,header,PATH_INFO) + if header['REQUEST_METHOD'] in methods: + try: + obj=getattr(web.__appname,modular) + except (AttributeError,UnboundLocalError): + status="500 Internal Server Error" + body=web.__tpl( + title = status, + e=status, + data="无法找到目录:"+str(modular)+"/" + ) + else: + try: + obj=getattr(obj,"controller") + except (AttributeError,UnboundLocalError): + status="404 Not Found" + body=web.__tpl( + title = status, + e=status, + data="无法找到目录:"+str(modular)+"/controller/" + ) + else: + try: + obj=getattr(obj,edition) + except (AttributeError,UnboundLocalError) as e: + con="无法找到目录:"+str(modular)+"/controller/"+str(edition)+"/" + try: + data=getattr(obj,"error")(e,con) + body,status,resheader=web.__tran( + self, + data, + status, + resheader + ) + except (AttributeError,UnboundLocalError): + status="404 Not Found" + body=web.__tpl( + title = status, + e=status,data=con + ) + except Exception as e: + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms,e=e + ) + else: + try: + obj=getattr(obj,files) + except (AttributeError,UnboundLocalError) as e: + con="无法找到文件:"+str(modular)+"/controller/"+str(edition)+"/"+str(files)+".py" + try: + data=getattr(obj,"error")(e,con) + body,status,resheader=web.__tran( + self + ,data + ,status + ,resheader + ) + except (AttributeError,UnboundLocalError): + status="404 Not Found" + body=web.__tpl( + title = status + ,data=con + ,e=status) + except Exception as e: + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms, + e=e + ) + else: + try: + data=None + if self.__config.app['before_request']: #请求前执行的函数 + try: + data=getattr(obj,self.__config.app['before_request'])() + if data: + body,status,resheader=web.__tran( + self,data, + status, + resheader + ) + except (AttributeError): + print(traceback.format_exc()) + pass + except Exception as e: + try: + data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) + body,status,resheader=web.__tran( + self,data, + status, + resheader + ) + except (AttributeError): + data=True + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + # print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms,e=e + ) + except Exception as e: + data=True + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms,e=e + ) + if not data: + data=getattr(obj,funct)(*param) + body,status,resheader=web.__tran( + self,data, + status, + resheader + ) + except Exception as e: + try: + data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) + body,status,resheader=web.__tran( + self,data, + status, + resheader + ) + except (AttributeError): + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms, + e=e + ) + except Exception as e: + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms, + e=e + ) + else: + status="405 Method Not Allowed" + body=web.__tpl( + title = status, + data='405 Method Not Allowed', + e='' + ) + try: + resheader['set-cookie']=globals.set_cookie + del globals.set_cookie + except:pass + + if self.__config.app['after_request']: #请求后执行的函数 + try: + data=getattr(obj,self.__config.app['after_request'])() + if data: + body,status,resheader=web.__tran(self,data,status,resheader) + except (AttributeError,UnboundLocalError):pass + except Exception as e: + try: + data=getattr(obj,"error")(e,traceback.format_exc().split("\n")) + body,status,resheader=web.__tran( + self,data, + status, + resheader + ) + except AttributeError as e: + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status + ,data=errms, + e=e + ) + except Exception as e: + status="500 Internal Server Error" + errms=status + if self.__config.app['app_debug']: + print(traceback.format_exc()) + errms=traceback.format_exc().split("\n") + body=web.__tpl( + title = status, + data=errms, + e="" + ) + resheaders=[] + for key in resheader: + resheaders.append((key,resheader[key])) + web.__del_globals() + if isinstance(resheaders,list): + if not body: + body='' + if type(body) is bytes: + pass + else: + body=str(body) + return str(status),resheaders,body + else: + raise Exception() + def __tpl(**context): + path=os.path.split(os.path.realpath(__file__))[0] + body='' + with open(path+'/tpl/error.html', 'r',encoding='utf-8') as f: + content=f.read() + t=Template(content) + body=t.render(**context) + return body + + + def __http_server(self,host,port,filename): + tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM) + try: + tcp_socket.bind((host,int(port))) + except OSError: + print("通常每个套接字地址(协议/网络地址/端口)只允许使用一次(按CTRL+C退出)") + else: + tcp_socket.listen(1024) + print('! 警告:这是开发服务器。不要在生产环境中部署使用它') + print('* 生产环境中建议使用gunicorn,gunicorn运行命令如:gunicorn -b '+host+':'+str(port)+' '+str(filename)+':app') + if self.__config.app['app_debug']: + print('* 调试器:开启') + else: + print('* 调试器:已关闭') + print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)") + while True: + new_tcp_socket,client_info=tcp_socket.accept() + t=threading.Thread(target=self.__server_client,args=(new_tcp_socket,)) + t.daemon=True + t.start() + tcp_socket.close() + def __server_client(self,new_socket): + # 处理http的的请求 + data=new_socket.recv(1047576).decode() + if data: + datas=data.split("\r\n") + data1=datas[0] + #reqsest + REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET + RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1 + if RAW_URI: + RAW_URI=RAW_URI[0] + else: + RAW_URI='' + PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc + QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 + SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1 + HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010 + if HTTP_HOST: + HTTP_HOST=HTTP_HOST[0] + else: + HTTP_HOST='' + HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie + if HTTP_COOKIE: + HTTP_COOKIE=HTTP_COOKIE[0] + else: + HTTP_COOKIE='' + REMOTE_ADDR='' + HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 + if HTTP_USER_AGENT: + HTTP_USER_AGENT=HTTP_USER_AGENT[0] + else: + HTTP_USER_AGENT='' + BODY_DATA=datas[len(datas)-1] + # print(data) + #reqsest + reqheader={ + 'REQUEST_METHOD':REQUEST_METHOD, + 'RAW_URI':RAW_URI, + 'PATH_INFO':PATH_INFO, + 'QUERY_STRING':QUERY_STRING, + 'SERVER_PROTOCOL':SERVER_PROTOCOL, + 'HTTP_HOST':HTTP_HOST, + 'HTTP_COOKIE':HTTP_COOKIE, + 'REMOTE_ADDR':REMOTE_ADDR, + 'HTTP_USER_AGENT':HTTP_USER_AGENT, + 'BODY_DATA':BODY_DATA + } + p=(config.app['staticpath']+RAW_URI.replace(' ','')) + # print("目录",p) + status='200 ok' + if os.path.isfile(p): + # print('静态文件',p) + kind = filetype.guess(p) + if kind is None: + + f=open(p,"rb") + body=f.read() + f.close() + resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] + + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + else: + f=open(p,"rb") + body=f.read() + f.close() + resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + else: + status,resheader,body=self.__routes(reqheader) + if type(body) is bytes: + pass + else: + body=body.encode() + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + + print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-") + t=time.time() + header+="Server:kcweb\n" + header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + for t in resheader: + header+="%s:%s\n" % (t[0],t[1]) + header+="\n" + try: + new_socket.send(header.encode()) + new_socket.send(body) + except Exception as e: + pass + new_socket.close() + def __http_sever(self,host,port): + #http测试服务器 + if self.__config.app['app_debug']: + print('* 调试器:开启') + else: + print('* 调试器:已关闭') + print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)") + tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM) + tcp_socket.bind((host,int(port))) + tcp_socket.listen(1024) + pack_length=1024 + tcp_socket.setblocking(False) + tcp_socket_list=list() + + + while True: + try: + new_tcp_socket,client_info=tcp_socket.accept() + except: + pass + else: + new_tcp_socket.setblocking(False) + tcp_socket_list.append(new_tcp_socket) + for cli_soc in tcp_socket_list: + try: + data=cli_soc.recv(pack_length).decode() + except Exception as e: + pass + else: + if data: + datas=data.split("\r\n") + data1=datas[0] + #reqsest + REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET + RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1 + if RAW_URI: + RAW_URI=RAW_URI[0] + else: + RAW_URI='' + PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc + QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1 + SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1 + HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010 + if HTTP_HOST: + HTTP_HOST=HTTP_HOST[0] + else: + HTTP_HOST='' + HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie + if HTTP_COOKIE: + HTTP_COOKIE=HTTP_COOKIE[0] + else: + HTTP_COOKIE='' + REMOTE_ADDR='' + HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0 + if HTTP_USER_AGENT: + HTTP_USER_AGENT=HTTP_USER_AGENT[0] + else: + HTTP_USER_AGENT='' + BODY_DATA=datas[len(datas)-1] + #reqsest + reqheader={ + 'REQUEST_METHOD':REQUEST_METHOD, + 'RAW_URI':RAW_URI, + 'PATH_INFO':PATH_INFO, + 'QUERY_STRING':QUERY_STRING, + 'SERVER_PROTOCOL':SERVER_PROTOCOL, + 'HTTP_HOST':HTTP_HOST, + 'HTTP_COOKIE':HTTP_COOKIE, + 'REMOTE_ADDR':REMOTE_ADDR, + 'HTTP_USER_AGENT':HTTP_USER_AGENT, + 'BODY_DATA':BODY_DATA + } + p=(config.app['staticpath']+RAW_URI.replace(' ','')) + + status='200 ok' + if os.path.isfile(p): + kind = filetype.guess(p) + if kind is None: + f=open(p,"rb") + body=f.read() + f.close() + resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] + + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + else: + f=open(p,"rb") + body=f.read() + f.close() + resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")] + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + else: + status,resheader,body=self.__routes(reqheader) + body=body.encode() + header="HTTP/1.1 %s \n" % status + header+="Content-Length:%d\n" % len(body) + + print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-") + t=time.time() + header+="Server:kcweb\n" + header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + for t in resheader: + header+="%s:%s\n" % (t[0],t[1]) + header+="\n" + try: + cli_soc.send(header.encode()) + cli_soc.send(body) + except Exception as e: + cli_soc.close() + else: + cli_soc.close() + tcp_socket_list.remove(cli_soc) + tcp_socket.close() diff --git a/application/__init__.py b/application/__init__.py new file mode 100644 index 0000000..915fbee --- /dev/null +++ b/application/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +#导入模块 +% for i in tuple_modular: +from . import ${i} +% endfor diff --git a/application/api/__init__.py b/application/api/__init__.py new file mode 100644 index 0000000..0f9c434 --- /dev/null +++ b/application/api/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from . import controller \ No newline at end of file diff --git a/application/api/common/__init__.py b/application/api/common/__init__.py new file mode 100644 index 0000000..0917cdf --- /dev/null +++ b/application/api/common/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from .autoload import * +#下面的方法在当前模块中有效 +def before_request(): + G.userinfo=get_session("userinfo") + print('${modular}模块在请求前执行,我是要在配置文件配置后才能生效哦!',G.userinfo) +def after_request(): + print('${modular}模块在请求后执行,我是要在配置文件配置后才能生效哦!') +def set_session(name,value,expire=None): + "设置session" + return session.set("${appname}${modular}"+str(name),value,expire) +def get_session(name): + "获取session" + return session.get("${appname}${modular}"+str(name)) +def del_session(name): + "删除session" + return session.rm("${appname}${modular}"+str(name)) +def tpl(path,**context): + return Template("/${modular}/tpl/"+str(path),**context) diff --git a/application/api/common/autoload.py b/application/api/common/autoload.py new file mode 100644 index 0000000..05965c2 --- /dev/null +++ b/application/api/common/autoload.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from ${appname}.common import * \ No newline at end of file diff --git a/application/api/controller/__init__.py b/application/api/controller/__init__.py new file mode 100644 index 0000000..5ab9848 --- /dev/null +++ b/application/api/controller/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from . import v1,v2 +# def error(err,data): +# "该函数在当前目录下无法匹配时被调用" +# return data,"200",{"Content-Type":"text/json; charset=utf-8"} \ No newline at end of file diff --git a/application/api/controller/v1/__init__.py b/application/api/controller/v1/__init__.py new file mode 100644 index 0000000..ab68b08 --- /dev/null +++ b/application/api/controller/v1/__init__.py @@ -0,0 +1,4 @@ +from . import index +# def error(err,data): +# "该函数在找不到模块时执行函数时和框架报错时被调用" +# return str(err) \ No newline at end of file diff --git a/application/api/controller/v1/index.py b/application/api/controller/v1/index.py new file mode 100644 index 0000000..80b47d4 --- /dev/null +++ b/application/api/controller/v1/index.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +from ${appname}.${modular}.common import * +def index(): + return tpl("/v1/index/index.html",title="欢迎使用kcweb框架",data=['这是${appname}应用${modular}模块下v1版本的一个模板渲染测试效果']) +def inter(id='',title=""): + data={ + 'title':title, + 'id':id + } + return successjson(data) +def home(id='',title=""): + data={ + "title":"标题是"+title, + "id":"id是"+id + } + return successjson(data) \ No newline at end of file diff --git a/application/api/controller/v2/__init__.py b/application/api/controller/v2/__init__.py new file mode 100644 index 0000000..ab68b08 --- /dev/null +++ b/application/api/controller/v2/__init__.py @@ -0,0 +1,4 @@ +from . import index +# def error(err,data): +# "该函数在找不到模块时执行函数时和框架报错时被调用" +# return str(err) \ No newline at end of file diff --git a/application/api/controller/v2/index.py b/application/api/controller/v2/index.py new file mode 100644 index 0000000..25d4d39 --- /dev/null +++ b/application/api/controller/v2/index.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from ${appname}.${modular}.common import * +def index(): + return tpl("/v2/index/index.html",title="欢迎使用kcweb框架",data=['这是${appname}应用${modular}模块下v2版本的一个模板渲染测试效果']) +def inter(): + data={ + 'title':'欢迎使用kcweb框架', + 'desc':'这是${appname}应用${modular}模块下v2版本的json输出效果' + } + return successjson(data) \ No newline at end of file diff --git a/application/api/tpl/v1/index/index.html b/application/api/tpl/v1/index/index.html new file mode 100644 index 0000000..f4818f5 --- /dev/null +++ b/application/api/tpl/v1/index/index.html @@ -0,0 +1,29 @@ + + + +${title} + + + + + + + +
+

这是v1模板文件

+ % if title: +

${title}

+ % endif + % if isinstance(data,str): + ${data} + % elif isinstance(data,list): +
    + % for i in data: +
  • ${i}
  • + % endfor +
+ % endif +
+ + + \ No newline at end of file diff --git a/application/api/tpl/v2/index/index.html b/application/api/tpl/v2/index/index.html new file mode 100644 index 0000000..f331dab --- /dev/null +++ b/application/api/tpl/v2/index/index.html @@ -0,0 +1,29 @@ + + + +${title} + + + + + + + +
+

这是v2模板文件

+ % if title: +

${title}

+ % endif + % if isinstance(data,str): + ${data} + % elif isinstance(data,list): +
    + % for i in data: +
  • ${i}
  • + % endfor +
+ % endif +
+ + + \ No newline at end of file diff --git a/application/common/__init__.py b/application/common/__init__.py new file mode 100644 index 0000000..05e1c79 --- /dev/null +++ b/application/common/__init__.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +from .autoload import * +def return_list(lists,count,pagenow,pagesize): + """返回分页列表 + + lists 数据库列表数据 + + count 数据库总数量 + + pagenow 页码 + + pagesize 每页数量 + """ + data={ + 'count':count, + 'pagenow':pagenow, + 'pagesize':pagesize, + 'pagecount':math.ceil(int(count)/int(pagesize)), + 'lists':lists + } + return data +def successjson(data=[],code=0,msg="成功",status='200 ok'): + """成功说在浏览器输出包装过的json + + 参数 data 结果 默认[] + + 参数 code body状态码 默认0 + + 参数 msg body状态描述 默认 成功 + + 参数 status http状态码 默认 200 + + 返回 json字符串结果集 + """ + res={ + "code":code, + "msg":msg, + "time":times(), + "data":data + } + return json_encode(res),status,{"Content-Type":"application/json; charset=utf-8","Access-Control-Allow-Origin":"*"} +def errorjson(data=[],code=1,msg="失败",status='500 error'): + """错误时在浏览器输出包装过的json + + 参数 data 结果 默认[] + + 参数 code body状态码 默认0 + + 参数 msg body状态描述 默认 成功 + + 参数 status http状态码 默认 200 + + 返回 json字符串结果集 + """ + return successjson(data=data,code=code,msg=msg,status=status) +def randoms(lens=6,types=1): + """生成随机字符串 + + lens 长度 + + types 1数字 2字母 3字母加数字 + """ + strs="0123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM,!@#$%^&*()_+=-;',./:<>?" + if types==1: + strs="0123456789" + elif types==2: + strs="qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM" + elif types==3: + strs="0123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM" + k='' + i=0 + while i < lens: + k+=random.choice(strs) + i+=1 + return k \ No newline at end of file diff --git a/application/common/autoload.py b/application/common/autoload.py new file mode 100644 index 0000000..99a5719 --- /dev/null +++ b/application/common/autoload.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from kcweb.common import * +from ${appname} import config +import math,random +G=globals.G \ No newline at end of file diff --git a/application/config/__init__.py b/application/config/__init__.py new file mode 100644 index 0000000..3257846 --- /dev/null +++ b/application/config/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +from .other import * +#下面的配置都是全局的 +# 应用配置 +app['app_debug']=True #是否开启调试模式 +app['tpl_folder']='./${appname}' #设置模板文件目录名 注意:所有的配置目录都是以您的运行文件所在目录开始 +app['before_request']='before_request' #设置请求前要执行的函数名 +app['after_request']='after_request' #设置请求后要执行的函数名 +app['staticpath']='${appname}/static' #静态主要目录 +# redis配置 +redis['host']='127.0.0.1' #服务器地址 +redis['port']=6379 #端口 +redis['password']='' #密码 +redis['db']=0 #Redis数据库 注:Redis用0或1或2等表示 +redis['pattern']=True # True连接池链接 False非连接池链接 +redis['ex']=0 #过期时间 (秒) +#缓存配置 +cache['type']='File' #驱动方式 支持 File Redis +cache['path']='./${appname}/runtime/cachepath' #缓存保存目录 +cache['expire']=120 #缓存有效期 0表示永久缓存 +cache['host']=redis['host'] #Redis服务器地址 +cache['port']=redis['port'] #Redis 端口 +cache['password']=redis['password'] #Redis登录密码 +cache['db']=1 #Redis数据库 注:Redis用1或2或3等表示 +# session配置 +session['type']='File' #session 存储类型 支持 file、Redis +session['path']='./${appname}/runtime/session/temp' #session缓存目录 +session['expire']=86400 #session默认有效期 该时间是指session在服务的保留时间,通常情况下浏览器上会保留该值的10倍 +session['prefix']="KCW" # SESSION 前缀 +session['host']=redis['host'] #Redis服务器地址 +session['port']=redis['port'] #Redis 端口 +session['password']=redis['password'] #Redis登录密码 +session['db']=2 #Redis数据库 注:Redis用1或2或3等表示 + +#email配置 +email['sender']='' #发件人邮箱账号 +email['pwd']='' #发件人邮箱密码(如申请的smtp给的口令) +email['sendNick']='' #发件人昵称 +email['theme']='' #默认主题 +email['recNick']='' #默认收件人昵称 \ No newline at end of file diff --git a/application/config/other.py b/application/config/other.py new file mode 100644 index 0000000..1e4e433 --- /dev/null +++ b/application/config/other.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +from kcweb.config import * +#路由配置 +route['default']=True #是否开启默认路由 默认路由开启后面不影响以下配置的路由,模块名/版本名/控制器文件名/方法名 作为路由地址 如:http://www.kcw.com/api/v1/index/index/ +route['modular']='${modular}' +route['edition']='v1' +route['files']='index' #默认路由文件(控制器) +route['funct']='index' #默认路由函数 (操作方法) +route['methods']=['POST','GET'] #默认请求方式 +route['children']=[ + {'title':'首页','path':'','component':'index/home','methods':['POST','GET']}, + {'title':'接口','path':'/inter/:id','component':'index/inter','methods':['POST','GET']} +] +#sqlite配置 +sqlite['db']='kcwlicuxweb' #sqlite数据库文件 diff --git a/common/__init__.py b/common/__init__.py new file mode 100644 index 0000000..11ffe10 --- /dev/null +++ b/common/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from . autoload import * +from . import globals +from . import session +from . import request diff --git a/common/autoload.py b/common/autoload.py new file mode 100644 index 0000000..4b917d1 --- /dev/null +++ b/common/autoload.py @@ -0,0 +1,323 @@ +# -*- coding: utf-8 -*- +import time,hashlib,json,re,os,platform +import datetime as core_datetime +from kcweb import config +from kcweb.utill.dateutil.relativedelta import relativedelta as core_relativedelta +from kcweb.utill.db import mysql as kcwmysql +from kcweb.utill.db import mongodb as kcwmongodb +from kcweb.utill.db import sqlite as kcwsqlite +from kcweb.utill.cache import cache as kcwcache +from kcweb.utill.redis import redis as kcwredis +from kcweb.utill.http import Http +from kcweb.utill.queues import Queues +from kcweb.utill.db import model +from mako.template import Template as kcwTemplate +import smtplib +from email.mime.text import MIMEText +from email.utils import formataddr +from . import globals + +redis=kcwredis() +def send_mail(user,text="邮件内容",theme="邮件主题",recNick="收件人昵称"): + """发送邮件 + + 参数 user:接收邮件的邮箱地址 + + 参数 text:邮件内容 + + 参数 theme:邮件主题 + + 参数 recNick:收件人昵称 + + return Boolean类型 + """ + ret=True + if not theme: + theme=config.email['theme'] + if not recNick: + recNick=config.email['recNick'] + try: + msg=MIMEText(text,'plain','utf-8') + msg['From']=formataddr([config.email['sendNick'],config.email['sender']]) + msg['To']=formataddr([recNick,user]) + msg['Subject']=theme + + server=smtplib.SMTP_SSL("smtp.qq.com", 465) + server.login(config.email['sender'], config.email['pwd']) + server.sendmail(config.email['sender'],[user,],msg.as_string()) + server.quit() + except Exception: + ret=False + return ret +get_sysinfodesffafew=None +def get_sysinfo(): + """获取系统信息 + + return dict类型 + """ + global get_sysinfodesffafew + if get_sysinfodesffafew: + sysinfo=get_sysinfodesffafew + else: + sysinfo={} + sysinfo['platform']=platform.platform() #获取操作系统名称及版本号,'Linux-3.13.0-46-generic-i686-with-Deepin-2014.2-trusty' + sysinfo['version']=platform.version() #获取操作系统版本号,'#76-Ubuntu SMP Thu Feb 26 18:52:49 UTC 2015' + sysinfo['architecture']=platform.architecture() #获取操作系统的位数,('32bit', 'ELF') + sysinfo['machine']=platform.machine() #计算机类型,'i686' + sysinfo['node']=platform.node() #计算机的网络名称,'XF654' + sysinfo['processor']=platform.processor() #计算机处理器信息,''i686' + sysinfo['uname']=platform.uname() #包含上面所有的信息汇总,('Linux', 'XF654', '3.13.0-46-generic', '#76-Ubuntu SMP Thu Feb 26 18:52:49 UTC 2015', 'i686', 'i686') + sysinfo['start_time']=times() + get_sysinfodesffafew=sysinfo + # 还可以获得计算机中python的一些信息: + # import platform + # platform.python_build() + # platform.python_compiler() + # platform.python_branch() + # platform.python_implementation() + # platform.python_revision() + # platform.python_version() + # platform.python_version_tuple() + return sysinfo +def Template(path,**context): + "模板渲染引擎函数,使用配置的模板路径" + return Templates(str(config.app['tpl_folder'])+str(path),**context) +def Templates(path,**context): + "模板渲染引擎函数,需要完整的模板目录文件" + body='' + with open(path, 'r',encoding='utf-8') as f: + content=f.read() + t=kcwTemplate(content) + body=t.render(**context) + return body +def mysql(table=None,configss=None): + """mysql数据库操作实例 + + 参数 table:表名 + + 参数 configss 数据库配置 可以传数据库名字符串 + """ + dbs=kcwmysql.mysql() + if table is None: + return dbs + elif configss: + return dbs.connect(configss).table(table) + else: + return dbs.connect(config.database).table(table) +def sqlite(table=None,configss=None): + """sqlite数据库操作实例 + + 参数 table:表名 + + 参数 configss 数据库配置 可以传数据库名字符串 + """ + dbs=kcwsqlite.sqlite() + if table is None: + return dbs + elif configss: + return dbs.connect(configss).table(table) + else: + return dbs.connect(config.sqlite).table(table) +def M(table=None,confi=None): + """数据库操作实例 + + 参数 table:表名 + + 参数 confi 数据库配置 可以传数据库名字符串 + """ + if confi: + if confi['type']=='sqlite': + return sqlite(table,confi) + else: + return mysql(table,confi) + else: + if config.database['type']=='sqlite': + return sqlite(table) + else: + return mysql(table) +def mongo(table=None,configss=None): + """mongodb数据库操作实例 + + 参数 table:表名(mongodb数据库集合名) + + 参数 configss mongodb数据库配置 可以传数据库名字符串 + """ + mObj=kcwmongodb.mongo() + if table is None: + return mObj + elif configss: + return mObj.connect(configss).table(table) + else: + return mObj.connect(config.mongo).table(table) +def is_index(params,index): + """判断列表或字典里的索引是否存在 + + params 列表或字典 + + index 索引值 + + return Boolean类型 + """ + try: + params[index] + except KeyError: + return False + except IndexError: + return False + else: + return True +def set_cache(name,values,expire="no"): + """设置缓存 + + 参数 name:缓存名 + + 参数 values:缓存值 + + 参数 expire:缓存有效期 0表示永久 单位 秒 + + return Boolean类型 + """ + return kcwcache.cache().set_cache(name,values,expire) +def get_cache(name): + """获取缓存 + + 参数 name:缓存名 + + return 或者的值 + """ + return kcwcache.cache().get_cache(name) +def del_cache(name): + """删除缓存 + + 参数 name:缓存名 + + return Boolean类型 + """ + return kcwcache.cache().del_cache(name) +def md5(strs): + """md5加密 + + 参数 strs:要加密的字符串 + + return String类型 + """ + m = hashlib.md5() + b = strs.encode(encoding='utf-8') + m.update(b) + return m.hexdigest() +def times(): + """生成时间戳整数 精确到秒(10位数字) + + return int类型 + """ + return int(time.time()) +def json_decode(strs): + """json字符串转python类型""" + try: + return json.loads(strs) + except Exception: + return {} +def json_encode(strs): + """python列表或字典转成字符串""" + try: + return json.dumps(strs,ensure_ascii=False) + except Exception: + return "" +def dateoperator(date,years=0,formats='%Y%m%d%H%M%S',months=0, days=0, hours=0, minutes=0,seconds=0, + leapdays=0, weeks=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + """日期相加减计算 + date 2019-10-10 + formats 设置需要返回的时间格式 默认%Y%m%d%H%M%S + + years 大于0表示加年 反之减年 + months 大于0表示加月 反之减月 + days 大于0表示加日 反之减日 + + return %Y%m%d%H%M%S + """ + formatss='%Y%m%d%H%M%S' + date=re.sub('[-年/月::日 时分秒]','',date) + if len(date) < 8: + return None + if len(date) < 14: + s=14-len(date) + i=0 + while i < s: + date=date+"0" + i=i+1 + d = core_datetime.datetime.strptime(date, formatss) + strs=(d + core_relativedelta(years=years,months=months, days=days, hours=hours, minutes=minutes,seconds=seconds, + leapdays=leapdays, weeks=weeks, microseconds=microseconds, + year=year, month=month, day=day, weekday=weekday, + yearday=yearday, nlyearday=nlyearday, + hour=hour, minute=minute, second=second, microsecond=microsecond)) + strs=strs.strftime(formats) + return strs +def get_folder(): + '获取当前框架所在目录' + path=os.path.split(os.path.realpath(__file__))[0] #当前文件目录 + framepath=path.split('\\') ##框架主目录 + s='' + for k in framepath: + s=s+'/'+k + framepath=s[1:] + return re.sub('/kcw/common','',framepath) #包所在目录 +# aa=[] +def get_file(folder='./',is_folder=True,suffix="*",lists=[],append=False): + """获取文件夹下所有文件夹和文件 + + folder 要获取的文件夹路径 + + is_folder 是否返回列表中包含文件夹 + + suffix 获取指定后缀名的文件 默认全部 + """ + if not append: + lists=[] + lis=os.listdir(folder) + for files in lis: + if not os.path.isfile(folder+"/"+files): + if is_folder: + zd={"type":"folder","path":folder+"/"+files,'name':files} + lists.append(zd) + get_file(folder+"/"+files,is_folder,suffix,lists,append=True) + else: + if suffix=='*': + zd={"type":"file","path":folder+"/"+files,'name':files} + lists.append(zd) + else: + if files[-(len(suffix)+1):]=='.'+str(suffix): + zd={"type":"file","path":folder+"/"+files,'name':files} + lists.append(zd) + return lists + +def list_to_tree(data, pk = 'id', pid = 'pid', child = 'lowerlist', root=0,childstatus=True): + """列表转换tree + + data 要转换的列表 + + pk 关联节点字段 + + pid 父节点字段 + + lowerlist 子节点列表 + + root 主节点值 + + childstatus 当子节点列表为空时是否需要显示子节点字段 + """ + arr = [] + for v in data: + if v[pid] == root: + kkkk=list_to_tree(data,pk,pid,child,v[pk],childstatus) + if childstatus: + # print(kkkk) + v[child]=kkkk + else: + if kkkk: + v[child]=kkkk + arr.append(v) + return arr \ No newline at end of file diff --git a/common/globals.py b/common/globals.py new file mode 100644 index 0000000..64ee2a0 --- /dev/null +++ b/common/globals.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +from threading import local +##普通全局变量 请求结束后面删除 +VAR = local() +HEADER = local() +G = local() + + + diff --git a/common/request.py b/common/request.py new file mode 100644 index 0000000..3b2582a --- /dev/null +++ b/common/request.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +from kcweb.common import globals as kcwglobals +from urllib import parse +import json +class args: + "获取url" + def get(name): + params = parse.parse_qs(parse.urlparse(kcwglobals.HEADER.URL).query) + try: + k=params[name][0] + except: + k=None + return k +class froms: + "获取from" + def get(name): + data=kcwglobals.HEADER.BODY_DATA + params = parse.parse_qs(parse.urlparse("?"+str(data)).query) + # print(params) + try: + k=parse.unquote(params[name][0]) + except: + k=None + return k +class HEADER: + def Method(): + return kcwglobals.HEADER.Method + def URL(): + return kcwglobals.HEADER.URL + def PATH_INFO(): + return kcwglobals.HEADER.PATH_INFO + def SERVER_PROTOCOL(): + return kcwglobals.HEADER.SERVER_PROTOCOL + def HTTP_HOST(): + return kcwglobals.HEADER.HTTP_HOST +def get_data(): + "获取请求参数体" + return kcwglobals.HEADER.BODY_DATA +def get_json(): + "获取请求参数体json" + try: + return json.loads(kcwglobals.HEADER.BODY_DATA) + except: + return None diff --git a/common/session.py b/common/session.py new file mode 100644 index 0000000..373ca2f --- /dev/null +++ b/common/session.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +from kcweb.config import session as kcwsession +from kcweb.common import globals as kcwglobals +import time,random,hashlib +from kcweb.utill.cache import cache as kcwcache +from datetime import datetime +def __md5(strs): + m = hashlib.md5() + m.update(strs.encode()) + return m.hexdigest() +def set(name,value,expire=None): + "设置session" + if not expire: + expire=kcwsession['expire'] + HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE + SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ####### + try: + HTTP_COOKIE=HTTP_COOKIE.split(";") + except: + token=None + else: + token=None + for k in HTTP_COOKIE: + if SESSIONID in k: + token=k.split("=")[1] + if not token: + strs="kcw"+str(time.time())+str(random.randint(0,9)) + token=__md5(strs) + # print(token) + kcwglobals.set_cookie=SESSIONID+"="+token+";expires="+datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')+"; Max-Age=%d;Path=/" % (int(expire)*10) + kcwcache.cache().set_config(kcwsession).set_cache(token,value,expire) + return True +def get(name): + "获取session" + HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE + try: + HTTP_COOKIE=HTTP_COOKIE.split(";") + except: + return None + SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ######### + token='' + for k in HTTP_COOKIE: + if SESSIONID in k: + token=k.split("=")[1] + v=kcwcache.cache().set_config(kcwsession).get_cache(token) + return v +def rm(name): + "删除session" + HTTP_COOKIE=kcwglobals.HEADER.HTTP_COOKIE + try: + HTTP_COOKIE=HTTP_COOKIE.split(";") + except: + return None + SESSIONID="SESSIONID"+__md5(str(name)+str(kcwsession['prefix']))[0:8] ####### + token='' + for k in HTTP_COOKIE: + if SESSIONID in k: + token=k.split("=")[1] + kcwcache.cache().set_config(kcwsession).del_cache(token) + kcwglobals.set_cookie=SESSIONID+"="+token+";expires="+datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')+"; Max-Age=2" + return True + diff --git a/config/__init__.py b/config/__init__.py new file mode 100644 index 0000000..554752a --- /dev/null +++ b/config/__init__.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# 应用配置 +app={} +app['app_debug']=True #是否开启调试模式 +app['tpl_folder']='./tpl' #设置模板文件目录名 注意:不能配置目录路径 +app['before_request']='' #设置请求前执行的函数 +app['after_request']='' #设置请求后执行的函数 +app['staticpath']='static' +# redis配置 +redis={} +redis['host']='127.0.0.1' #服务器地址 +redis['port']=6379 #端口 +redis['password']='' #密码 +redis['db']=0 #Redis数据库 注:Redis用0或1或2等表示 +redis['pattern']=True # True连接池链接 False非连接池链接 +redis['ex']=0 #过期时间 (秒) + +#缓存配置 +cache={} +cache['type']='File' #驱动方式 支持 File Redis +cache['path']='runtime/cachepath' #缓存保存目录 +cache['expire']=120 #缓存有效期 0表示永久缓存 +cache['host']=redis['host'] #Redis服务器地址 +cache['port']=redis['port'] #Redis 端口 +cache['password']=redis['password'] #Redis登录密码 +cache['db']=1 #Redis数据库 注:Redis用1或2或3等表示 + +# session配置 +session={} +session['type']='File' #session 存储类型 支持 file、Redis +session['path']='runtime/session' #session缓存目录 +session['expire']=86400 #session默认有效期 该时间是指session在服务的保留时间,通常情况下浏览器上会保留该值的10倍 +session['prefix']="KCW" # SESSION 前缀 +session['host']=redis['host'] #Redis服务器地址 +session['port']=redis['port'] #Redis 端口 +session['password']=redis['password'] #Redis登录密码 +session['db']=2 #Redis数据库 注:Redis用1或2或3等表示 + + +# 默认数据库配置 +database={} +database['type']='mysql' # 数据库类型 目前支持mysql和sqlite +database['host']=['127.0.0.1']#服务器地址 [地址1,地址2,地址3...] 多个地址分布式(主从服务器)下有效 +database['port']=[3306] #端口 [端口1,端口2,端口3...] +database['user']=['root'] #用户名 [用户名1,用户名2,用户名3...] +database['password']=['root'] #密码 [密码1,密码2,密码3...] +database['db']=['test'] #数据库名 [数据库名1,数据库名2,数据库名3...] +database['charset']='utf8' #数据库编码默认采用utf8 +database['pattern']=False # True数据库长连接模式 False数据库短连接模式 注:建议web应用使用短连接,cli应用使用长连接 +database['cli']=False # 是否以cli方式运行 +database['dbObjcount']=1 # 连接池数量(单个数据库地址链接数量),数据库链接实例数量 mysql长链接模式下有效 +database['deploy']=0 # 数据库部署方式:0 集中式(单一服务器),1 分布式(主从服务器) mysql数据库有效 +database['master_num']=1 #主服务器数量 不能超过host服务器数量 (等于服务器数量表示读写不分离:主主复制。 小于服务器表示读写分离:主从复制。) mysql数据库有效 +database['master_dql']=False #主服务器是否可以执行dql语句 是否可以执行select语句 主服务器数量大于等于host服务器数量时必须设置True +database['break']=0 #断线重连次数,0表示不重连。 注:cli模式下 10秒进行一次重连并且连接次数是当前配置的300倍 + +#sqlite配置 +sqlite={} +sqlite['db']='kcwdb' # 数据库文件存放地址 + +#mongodb配置 +mongo={} +mongo['host']='127.0.0.1' +mongo['port']='27017' +mongo['user']='' +mongo['password']='' +mongo['db']='test' +mongo['retryWrites']=False #是否支持重新写入 + + + +#路由配置 +route={} +route['default']=True +route['modular']='' +route['edition']='' +route['files']='index' #默认路由文件 +route['funct']='index' #默认路由函数 +route['methods']=['POST','GET'] #默认请求方式 +route['children']=[] +#email配置 +email={} +email['sender']='' #发件人邮箱账号 +email['pwd']='' #发件人邮箱密码(如申请的smtp给的口令) +email['sendNick']='' #发件人昵称 +email['theme']='' #默认主题 +email['recNick']='' #默认收件人昵称 + +kcweb={} +kcweb['name']='kcweb' #项目的名称 +kcweb['version']='2.40.7' #项目版本 +kcweb['description']='基于python后端开发框架' #项目的简单描述 +kcweb['long_description']='' #项目详细描述 +kcweb['license']='MIT' #开源协议 mit开源 +kcweb['url']='http://intapp.kwebapp.cn/index/index/doc/docde/1' +kcweb['author']='禄可集团-坤坤' #名字 +kcweb['author_email']='fk1402936534@qq.com' #邮件地址 +kcweb['maintainer']='坤坤' #维护人员的名字 +kcweb['maintainer_email']='fk1402936534@qq.com' #维护人员的邮件地址 + +#其他配置 +other={} + diff --git a/create.py b/create.py new file mode 100644 index 0000000..036c060 --- /dev/null +++ b/create.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +import os,re,traceback,shutil,platform,sys +from mako.template import Template as kcwTemplate +def Template(path,**context): + body='' + with open(str(path), 'r',encoding='utf-8') as f: + content=f.read() + t=kcwTemplate(content) + body=t.render(**context) + return body +class create: + appname=None + modular=None + path=os.path.split(os.path.realpath(__file__))[0] #当前文件目录 + def __init__(self,appname="application",modular="api"): + self.appname=appname + self.modular=modular + if not os.path.exists(self.appname): + os.makedirs(self.appname) + if not os.path.exists(self.appname+"/common"): + os.makedirs(self.appname+"/common") + f=open(self.appname+"/common/__init__.py","w+",encoding='utf-8') + content=Template(self.path+"/application/common/__init__.py",appname=appname,modular=modular) + f.write(content) + f.close() + f=open(self.appname+"/common/autoload.py","w+",encoding='utf-8') + content=Template(self.path+"/application/common/autoload.py",appname=appname,modular=modular) + f.write(content) + f.close() + if not os.path.exists(self.appname+"/config"): + os.makedirs(self.appname+"/config") + f=open(self.appname+"/config/__init__.py","w+",encoding='utf-8') + content=Template(self.path+"/application/config/__init__.py",appname=appname,modular=modular) + f.write(content) + f.close() + f=open(self.appname+"/config/other.py","w+",encoding='utf-8') + content=Template(self.path+"/application/config/other.py",appname=appname,modular=modular) + f.write(content) + f.close() + if not os.path.exists(self.appname+"/"+self.modular): #创建模块 + os.makedirs(self.appname+"/"+self.modular) + self.zxmodular("") + #在应用目录下创建初始化文件 + lists=os.listdir(self.appname) + modulars=[] + filters=['__init__','__pycache__','common','config','runtime','log'] + for files in lists: + if not os.path.isfile(self.appname+"/"+files): + if files not in filters: + modulars.append(files) + f=open(self.appname+"/__init__.py","w+",encoding='utf-8') + content=Template(self.path+"/application/__init__.py",appname=appname,tuple_modular=modulars) + f.write(content) + f.close() + if "Windows" in platform.platform(): + pythonname="python" + else: + pythonname="python3" + sys.argv[0]=re.sub('.py','',sys.argv[0]) + content=('# #gunicorn -b 0.0.0.0:39010 '+self.appname+':app\n'+ + 'from kcweb import web\n'+ + 'import '+self.appname+' as application\n'+ + 'from '+self.appname+'.common import *\n'+ + 'Queues.start() #开启队列监听\n'+ + 'app=web(__name__,application)\n'+ + 'if __name__ == "__main__":\n'+ + ' #host监听ip port端口 name python解释器名字 (windows一般是python linux一般是python3)\n'+ + ' app.run(host="0.0.0.0",port="39001",name="'+pythonname+'")') + f=open("./"+sys.argv[0]+".py","w+",encoding='utf-8') + f.write(content) + f.close() + def zxmodular(self,sourcep): + "处理模块文件" + path1=self.path+"/application/api"+sourcep + path2=self.appname+"/"+self.modular+sourcep + lists=os.listdir(path1) + for files in lists: + if os.path.isfile(path1+"/"+files): + if ".py" in files: + content=Template(path1+"/"+files,appname=self.appname,modular=self.modular) + f=open(path2+"/"+files,"w+",encoding='utf-8') + f.write(content) + f.close() + else: + f=open(path1+"/"+files,"r",encoding='utf-8') + content=f.read() + f.close() + f=open(path2+"/"+files,"w+",encoding='utf-8') + f.write(content) + f.close() + elif files != '__pycache__': + if not os.path.exists(path2+"/"+files): + os.makedirs(path2+"/"+files) + self.zxmodular(sourcep+"/"+files) + diff --git a/tpl/error.html b/tpl/error.html new file mode 100644 index 0000000..e23605b --- /dev/null +++ b/tpl/error.html @@ -0,0 +1,27 @@ + + + +${title} + + + + + + +
+ % if e: +

${e}

+ % endif + % if isinstance(data,str): + ${data} + % elif isinstance(data,list): +
    + % for i in data: +
  • ${i}
  • + % endfor +
+ % endif + +
+ + \ No newline at end of file diff --git a/utill/Queues b/utill/Queues new file mode 100644 index 0000000000000000000000000000000000000000..59a2485c5ea3ecbb2fa749a1825af7000325d298 GIT binary patch literal 12288 zcmeI2O>9(E6o6-FLrY;=V`7k%Z)1c=%>Dma5UtM{K}w-@s)7pMpL4O1g0y9!OQT8g zCvjz5m>8mo#soHQBqYWdL_jdcts57BuyW(VrDrU?wl%NDExea@+L_$<&AHz_=iEDK zpPm?-o`)hGo1Jm<1#jHbXf_*<6-A@b9BDKfefYg}HSw4Jr6>5a_xwLDWIS^J#TJfQ zH+-}){0%kK31qSx5Tfh0?hnxH7oj(F|$B*Dg z&qH%?`~(~gtDp9jlkL%NyXcNSKGrT)joo_G&A@iNbK4s`ngg4+Y-zqZ^*3|k1e}1m z#m99k2#c>3Tc=~OYrNZjvOQUh@9h@jQ)6SrzR6vCMkn_dyW9JV(W&m1FgxvGE)Qlz zn2N#SScDwz()+SyZ#y%0xahW@!+G4E%k~0hXOGRUe$mA^KRxpgYianl*6^)H|M02C z$XCOsMqVA+I{fSKhr|0?Keyg%?QHdJ`g+rAo5aT7H-51ZHWov_41F*(F|=vJ_Z!~W zuw(Gg!7m4o4sIK`Ht_Mlfr0zh|Fr&{^_{yQiR&rW25JMffjhuJ=iUvCJrA~;54QRZ zv&zUQlpsa{5sW(#n9`O;8725z=RUl7=F@BE&Ru`|RR4{WpWQt5?)5Y8-dK45;Mu+N zC12z`(CViwBxfx1fl41WI3rCo;8_Zs2q~6x*8O(+TNv#idFldCnD$6_Iq>*$F`<5VRLoad06; z&J0sjGhwKXiYzCra2o9zv~$66qa}}2#ALWe6c>soEx;}3EO)Z-W9c%4Y7&yP zy?8V+1|p5tr2tYVW*MQCkgMfO8KSiITuCaqP8bW2$f?u}1WVE>R^{aE!qxInhKLX_ zaw)A6l%dO*AVemZ)q)u%_!ta6DLu9Pok2tdM< zmNwonrxleZMM8u0GEoWwcx5D4$`>+38by+z1T;gE4M7Hnel2BMP!+3J=Sul}hUg%I zVG{86knk}%Cp7Vb0L8p@D^g{}5??6~WQg~;ERiLTDF)!EkCAwyl2^<}%>K&h{7~-C z5G^0BzO zG9+nZ9A+zE{Zf?-#mbfh3=k`+qoFdAOJz4hA{Wv-pR^)M0Y(KhVYeg(%_bkJlkH;p zbcSHYYMeghSoX$5qLndB;{+{Q0a=}F7t6^E2`*u8LkKGH9we4BE-j6oAj*pB+PPRx zWC%+N`;kO|37k@936~_LV2l^ex@srim-{lLSG&$Bhz^^hBHm)3lpI1J3>c-=)45RY z&5$UJRxu_UsA7_O;&FYwO1)x^T2+g^P>yE^=eTe6s+X~7h?9Z|&Xf`yt0)o2Dwp`X za!-Z`f=NP+N=FcMj(;{?F~J!>(Jck)fTN75) z&c8V^b8wdmo=`y|#U@KU1m`hvP2{*#WM%oR=Dghd+kA86j|P6#S8bp+P#dTX)COt; gwSn3|ZJ;(#8>kJ`25JL$kb$9Q-1): + s=re.sub('/','',kk[0]) + if s==requeststr[0:len(s)]: + data=True + break + else: + routestr= re.sub('/','',k) + if routestr==requeststr: + data=True + break + for k in config.oauth['unwanted']: + if k == '*': + data=False + kk=k.split("*") + if(len(kk)>1): + s=re.sub('/','',kk[0]) + if s==requeststr[0:len(s)]: + data=False + break + else: + routestr= re.sub('/','',k) + if routestr==requeststr: + data=False + break + return data \ No newline at end of file diff --git a/utill/cache/cache.py b/utill/cache/cache.py new file mode 100644 index 0000000..17c87f1 --- /dev/null +++ b/utill/cache/cache.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +import os,sys,time,hashlib,json +from kcweb import config +from kcweb.utill import redis +from kcweb.utill.db.mysql import mysql + +import time,hashlib +def md5(strs): + """md5加密""" + if not strs: + return strs + m = hashlib.md5() + b = strs.encode(encoding='utf-8') + m.update(b) + return m.hexdigest() +def times(): + """时间戳 精确到秒""" + return int(time.time()) +def json_decode(jsonstr): + """json字符串转python类型""" + try: + return eval(jsonstr) + except Exception: + return {} +# def json_decode(strs): +# """json字符串转python类型""" +# try: +# return json.loads(strs) +# except Exception: +# return {} +class cache: + "开发完善中..." + __name=None + __values=None + __cachepath='' #os.path.split(os.path.realpath(__file__))[0]+'/../../../' + __config=config.cache + __redisobj=None + __mysqlobj=None + def __setmysqlonj(self): + conf=config.database + if 'host' in self.__config and self.__config['host']: + conf['host']=[self.__config['host']] + if 'port' in self.__config and self.__config['port']: + conf['port']=[self.__config['port']] + if 'user' in self.__config and self.__config['user']: + conf['user']=[self.__config['user']] + if 'password' in self.__config and self.__config['password']: + conf['password']=[self.__config['password']] + if 'db' in self.__config and self.__config['db']: + conf['db']=[self.__config['db']] + db=mysql() + self.__mysqlobj=db.connect(conf) + def __setredisobj(self): + "设置redis链接实例" + conf=config.redis + if 'host' in self.__config and self.__config['host']: + conf['host']=self.__config['host'] + if 'port' in self.__config and self.__config['port']: + conf['port']=self.__config['port'] + if 'password' in self.__config and self.__config['password']: + conf['password']=self.__config['password'] + if 'db' in self.__config and self.__config['db']: + conf['db']=self.__config['db'] + if conf['pattern']: + if conf['password']: + redis_pool=redis.ConnectionPool(host=conf['host'],password=conf['password'],port=conf['port'],db=conf['db']) + else: + redis_pool=redis.ConnectionPool(host=conf['host'],port=conf['port'],db=conf['db']) + self.__redisobj=redis.Redis(connection_pool=redis_pool) + else: + if conf['password']: + self.__redisobj=redis.Redis(host=conf['host'],password=conf['password'],port=conf['port'],db=conf['db']) + else: + self.__redisobj=redis.Redis(host=conf['host'],port=conf['port'],db=conf['db']) + def set_cache(self,name,values,expire = 'no'): + """设置缓存 + + 参数 name:缓存名 + + 参数 values:缓存值 + + 参数 expire:缓存有效期 0表示永久 单位 秒 + + return Boolean类型 + """ + # print(name) + # exit() + self.__name=name + self.__values=values + if expire != 'no': + self.__config['expire']=int(expire) + return self.__seltype('set') + def get_cache(self,name): + """获取缓存 + + return 或者的值 + """ + self.__name=name + return self.__seltype('get') + def del_cache(self,name): + """删除缓存 + + return Boolean类型 + """ + self.__name=name + return self.__seltype('del') + def set_config(self,congig): + """设置缓存配置 + """ + self.__config=congig + return self + + + def __seltype(self,types): + """选择缓存""" + # m = hashlib.md5() + # b = self.__name.encode(encoding='utf-8') + # m.update(b) + self.__name=md5(self.__name) + if self.__config['type'] == 'File': + if types == 'set': + return self.__setfilecache() + elif types=='get': + return self.__getfilecache() + elif types=='del': + return self.__delfilecache() + elif self.__config['type'] == 'Redis': + self.__setredisobj() + if types == 'set': + return self.__setrediscache() + elif types=='get': + return self.__getrediscache() + elif types=='del': + return self.__delrediscache() + elif self.__config['type'] == 'MySql': + self.__setmysqlonj() + if types == 'set': + return self.__setmysqlcache() + elif types == 'get': + return self.__getmysqlcache() + elif types == 'del': + return self.__delmysqlcache() + def __setmysqlcache(self): ######################################################################################## + """设置mysql缓存 + + return Boolean类型 + """ + data=[str(self.__values)] + strs="[" + for k in data: + strs=strs+k + strs=strs+"]" + k=self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).count('id') + self.__setmysqlonj() + if k: + return self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).update({"val":strs,"expire":self.__config['expire'],"time":times()}) + else: + return self.__mysqlobj.table('fanshukeji_core_cache').insert({"name":self.__name,"val":strs,"expire":self.__config['expire'],"time":times()}) + def __getmysqlcache(self): + """获取mysql缓存 + + return 缓存的值 + """ + data=self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).find() + if data : + if data['expire']>0 and times()-data['time']>data['expire']: + self.__setmysqlonj() + self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).delete() + return False + else: + return eval(data['val'])[0] + else: + return False + def __delmysqlcache(self): + """删除mysql缓存 + + return Boolean类型 + """ + return self.__mysqlobj.table('fanshukeji_core_cache').where("name",self.__name).delete() + def __setrediscache(self): + """设置redis缓存 + + return Boolean类型 + """ + # print(self.__redisobj) + data=[self.__values] + try: + if self.__config['expire']: + self.__redisobj.set(self.__name,str(data),self.__config['expire']) + else: + self.__redisobj.set(self.__name,str(data)) + except: + return False + return True + def __getrediscache(self): + """获取redis缓存 + + return 缓存的值 + """ + lists=self.__redisobj.get(self.__name) + if lists: + data=eval(lists) + return data[0] + else: + return False + def __delrediscache(self): + """删除redis缓存 + + return int类型 + """ + return self.__redisobj.delete(self.__name) + def __setfilecache(self): + """设置文件缓存 + + return Boolean类型 + """ + data={ + 'expire':self.__config['expire'], + 'time':times(), + 'values':self.__values + } + if not os.path.exists(self.__config['path']): + os.makedirs(self.__config['path']) #多层创建目录 + f=open(self.__config['path']+"/"+self.__name,"w") + f.write(str(data)) + f.close() + return True + def __getfilecache(self): + """获取文件缓存 + + return 缓存的值 + """ + try: + f=open(self.__config['path']+"/"+self.__name,"r") + except Exception: + return None + json_str=f.read() + f.close() + ar=json_decode(json_str) + + if ar['expire'] > 0: + if (times()-ar['time']) > ar['expire']: + + self.__delfilecache() + return None + else: + return ar['values'] + else: + return ar['values'] + def __delfilecache(self): + """删除文件缓存 + + return Boolean类型 + """ + if not os.path.exists(self.__config['path']+"/"+self.__name): + return True + try: + os.remove(self.__config['path']+"/"+self.__name) + except: + return False + return True \ No newline at end of file diff --git a/utill/dateutil/__init__.py b/utill/dateutil/__init__.py new file mode 100644 index 0000000..0defb82 --- /dev/null +++ b/utill/dateutil/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] diff --git a/utill/dateutil/_common.py b/utill/dateutil/_common.py new file mode 100644 index 0000000..4eb2659 --- /dev/null +++ b/utill/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/utill/dateutil/_version.py b/utill/dateutil/_version.py new file mode 100644 index 0000000..670d7ab --- /dev/null +++ b/utill/dateutil/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '2.8.0' diff --git a/utill/dateutil/easter.py b/utill/dateutil/easter.py new file mode 100644 index 0000000..53b7c78 --- /dev/null +++ b/utill/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/utill/dateutil/parser/__init__.py b/utill/dateutil/parser/__init__.py new file mode 100644 index 0000000..216762c --- /dev/null +++ b/utill/dateutil/parser/__init__.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from ._parser import parse, parser, parserinfo +from ._parser import DEFAULTPARSER, DEFAULTTZPARSER +from ._parser import UnknownTimezoneWarning + +from ._parser import __doc__ + +from .isoparser import isoparser, isoparse + +__all__ = ['parse', 'parser', 'parserinfo', + 'isoparse', 'isoparser', + 'UnknownTimezoneWarning'] + + +### +# Deprecate portions of the private interface so that downstream code that +# is improperly relying on it is given *some* notice. + + +def __deprecated_private_func(f): + from functools import wraps + import warnings + + msg = ('{name} is a private function and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=f.__name__) + + @wraps(f) + def deprecated_func(*args, **kwargs): + warnings.warn(msg, DeprecationWarning) + return f(*args, **kwargs) + + return deprecated_func + +def __deprecate_private_class(c): + import warnings + + msg = ('{name} is a private class and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=c.__name__) + + class private_class(c): + __doc__ = c.__doc__ + + def __init__(self, *args, **kwargs): + warnings.warn(msg, DeprecationWarning) + super(private_class, self).__init__(*args, **kwargs) + + private_class.__name__ = c.__name__ + + return private_class + + +from ._parser import _timelex, _resultbase +from ._parser import _tzparser, _parsetz + +_timelex = __deprecate_private_class(_timelex) +_tzparser = __deprecate_private_class(_tzparser) +_resultbase = __deprecate_private_class(_resultbase) +_parsetz = __deprecated_private_func(_parsetz) diff --git a/utill/dateutil/parser/_parser.py b/utill/dateutil/parser/_parser.py new file mode 100644 index 0000000..0da0f3e --- /dev/null +++ b/utill/dateutil/parser/_parser.py @@ -0,0 +1,1580 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: + +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import re +import string +import time +import warnings + +from calendar import monthrange +from io import StringIO + +import six +from six import integer_types, text_type + +from decimal import Decimal + +from warnings import warn + +from .. import relativedelta +from .. import tz + +__all__ = ["parse", "parserinfo"] + + +# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth +# making public and/or figuring out if there is something we can +# take off their plate. +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if six.PY2: + # In Python 2, we can't duck type properly because unicode has + # a 'decode' function, and we'd be double-decoding + if isinstance(instream, (bytes, bytearray)): + instream = instream.decode() + else: + if getattr(instream, 'decode', None) is not None: + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + elif getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), # TODO: "Tues" + ("Wed", "Wednesday"), + ("Thu", "Thursday"), # TODO: "Thurs" + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), # TODO: "Febr" + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z", "z"] + PERTAIN = ["of"] + TZOFFSET = {} + # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", + # "Anno Domini", "Year of Our Lord"] + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + """ + Converts two-digit years to year within [-50, 49] + range of self._year (current local time) + """ + + # Function contract is that the year is always positive + assert year >= 0 + + if year < 100 and not century_specified: + # assume current century to start + year += self._century + + if year >= self._year + 50: # if too far in future + year -= 100 + elif year < self._year - 50: # if too far in past + year += 100 + + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if ((res.tzoffset == 0 and not res.tzname) or + (res.tzname == 'Z' or res.tzname == 'z')): + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.dstridx = None + self.mstridx = None + self.ystridx = None + + @property + def has_year(self): + return self.ystridx is not None + + @property + def has_month(self): + return self.mstridx is not None + + @property + def has_day(self): + return self.dstridx is not None + + def could_be_day(self, value): + if self.has_day: + return False + elif not self.has_month: + return 1 <= value <= 31 + elif not self.has_year: + # Be permissive, assume leapyear + month = self[self.mstridx] + return 1 <= value <= monthrange(2000, month)[1] + else: + month = self[self.mstridx] + year = self[self.ystridx] + return 1 <= value <= monthrange(year, month)[1] + + def append(self, val, label=None): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + elif val > 100: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + + super(self.__class__, self).append(int(val)) + + if label == 'M': + if self.has_month: + raise ValueError('Month is already set') + self.mstridx = len(self) - 1 + elif label == 'D': + if self.has_day: + raise ValueError('Day is already set') + self.dstridx = len(self) - 1 + elif label == 'Y': + if self.has_year: + raise ValueError('Year is already set') + self.ystridx = len(self) - 1 + + def _resolve_from_stridxs(self, strids): + """ + Try to resolve the identities of year/month/day elements using + ystridx, mstridx, and dstridx, if enough of these are specified. + """ + if len(self) == 3 and len(strids) == 2: + # we can back out the remaining stridx value + missing = [x for x in range(3) if x not in strids.values()] + key = [x for x in ['y', 'm', 'd'] if x not in strids] + assert len(missing) == len(key) == 1 + key = key[0] + val = missing[0] + strids[key] = val + + assert len(self) == len(strids) # otherwise this should not be called + out = {key: self[strids[key]] for key in strids} + return (out.get('y'), out.get('m'), out.get('d')) + + def resolve_ymd(self, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + strids = (('y', self.ystridx), + ('m', self.mstridx), + ('d', self.dstridx)) + + strids = {key: val for key, val in strids if val is not None} + if (len(self) == len(strids) > 0 or + (len(self) == 3 and len(strids) == 2)): + return self._resolve_from_stridxs(strids) + + mstridx = self.mstridx + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): + # One member, or two members with a month string + if mstridx is not None: + month = self[mstridx] + # since mstridx is 0 or 1, self[mstridx-1] always + # looks up the other element + other = self[mstridx - 1] + else: + other = self[0] + + if len_ymd > 1 or mstridx is None: + if other > 31: + year = other + else: + day = other + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + if self[1] > 31: + # Apr-2003-25 + month, year, day = self + else: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if (self[0] > 31 or + self.ystridx == 0 or + (yearfirst and self[1] <= 12 and self[2] <= 31)): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param \\*\\*kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format:", timestr) + + if len(res) == 0: + raise ValueError("String does not contain a date:", timestr) + + ret = self._build_naive(res, default) + + if not ignoretz: + ret = self._build_tzaware(ret, res, tzinfos) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm","any_unused_tokens"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + skipped_idxs = [] + + # year/month/day list + ymd = _ymd() + + len_l = len(l) + i = 0 + try: + while i < len_l: + + # Check if it's a number + value_repr = l[i] + try: + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Numeric token + i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) + + # Check weekday + elif info.weekday(l[i]) is not None: + value = info.weekday(l[i]) + res.weekday = value + + # Check month name + elif info.month(l[i]) is not None: + value = info.month(l[i]) + ymd.append(value, 'M') + + if i + 1 < len_l: + if l[i + 1] in ('-', '/'): + # Jan-01[-99] + sep = l[i + 1] + ymd.append(l[i + 2]) + + if i + 3 < len_l and l[i + 3] == sep: + # Jan-01-99 + ymd.append(l[i + 4]) + i += 2 + + i += 2 + + elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and + info.pertain(l[i + 2])): + # Jan of 01 + # In this case, 01 is clearly year + if l[i + 4].isdigit(): + # Convert it here to become unambiguous + value = int(l[i + 4]) + year = str(info.convertyear(value)) + ymd.append(year, 'Y') + else: + # Wrong guess + pass + # TODO: not hit in tests + i += 4 + + # Check am/pm + elif info.ampm(l[i]) is not None: + value = info.ampm(l[i]) + val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) + + if val_is_ampm: + res.hour = self._adjust_ampm(res.hour, value) + res.ampm = value + + elif fuzzy: + skipped_idxs.append(i) + + # Check for a timezone name + elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i + 1 < len_l and l[i + 1] in ('+', '-'): + l[i + 1] = ('+', '-')[l[i + 1] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + # Check for a numbered timezone + elif res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + len_li = len(l[i + 1]) + + # TODO: check that l[i + 1] is integer? + if len_li == 4: + # -0300 + hour_offset = int(l[i + 1][:2]) + min_offset = int(l[i + 1][2:]) + elif i + 2 < len_l and l[i + 2] == ':': + # -03:00 + hour_offset = int(l[i + 1]) + min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? + i += 2 + elif len_li <= 2: + # -[0]3 + hour_offset = int(l[i + 1][:2]) + min_offset = 0 + else: + raise ValueError(timestr) + + res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) + + # Look for a timezone name between parenthesis + if (i + 5 < len_l and + info.jump(l[i + 2]) and l[i + 3] == '(' and + l[i + 5] == ')' and + 3 <= len(l[i + 4]) and + self._could_be_tzname(res.hour, res.tzname, + None, l[i + 4])): + # -0300 (BRST) + res.tzname = l[i + 4] + i += 4 + + i += 1 + + # Check jumps + elif not (info.jump(l[i]) or fuzzy): + raise ValueError(timestr) + + else: + skipped_idxs.append(i) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) + + res.century_specified = ymd.century_specified + res.year = year + res.month = month + res.day = day + + except (IndexError, ValueError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + skipped_tokens = self._recombine_skipped(l, skipped_idxs) + return res, tuple(skipped_tokens) + else: + return res, None + + def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): + # Token is a number + value_repr = tokens[idx] + try: + value = self._to_decimal(value_repr) + except Exception as e: + six.raise_from(ValueError('Unknown numeric token'), e) + + len_li = len(value_repr) + + len_l = len(tokens) + + if (len(ymd) == 3 and len_li in (2, 4) and + res.hour is None and + (idx + 1 >= len_l or + (tokens[idx + 1] != ':' and + info.hms(tokens[idx + 1]) is None))): + # 19990101T23[59] + s = tokens[idx] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = tokens[idx] + + if not ymd and '.' not in tokens[idx]: + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + + # TODO: Check if res attributes already set. + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = self._parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = tokens[idx] + ymd.append(s[:4], 'Y') + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) + (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) + if hms is not None: + # TODO: checking that hour/minute/second are not + # already set? + self._assign_hms(res, value_repr, hms) + + elif idx + 2 < len_l and tokens[idx + 1] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? + (res.minute, res.second) = self._parse_min_sec(value) + + if idx + 4 < len_l and tokens[idx + 3] == ':': + res.second, res.microsecond = self._parsems(tokens[idx + 4]) + + idx += 2 + + idx += 2 + + elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): + sep = tokens[idx + 1] + ymd.append(value_repr) + + if idx + 2 < len_l and not info.jump(tokens[idx + 2]): + if tokens[idx + 2].isdigit(): + # 01-01[-01] + ymd.append(tokens[idx + 2]) + else: + # 01-Jan[-01] + value = info.month(tokens[idx + 2]) + + if value is not None: + ymd.append(value, 'M') + else: + raise ValueError() + + if idx + 3 < len_l and tokens[idx + 3] == sep: + # We have three members + value = info.month(tokens[idx + 4]) + + if value is not None: + ymd.append(value, 'M') + else: + ymd.append(tokens[idx + 4]) + idx += 2 + + idx += 1 + idx += 1 + + elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): + if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: + # 12 am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) + idx += 1 + else: + # Year, month or day + ymd.append(value) + idx += 1 + + elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): + # 12am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) + idx += 1 + + elif ymd.could_be_day(value): + ymd.append(value) + + elif not fuzzy: + raise ValueError() + + return idx + + def _find_hms_idx(self, idx, tokens, info, allow_jump): + len_l = len(tokens) + + if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: + # There is an "h", "m", or "s" label following this token. We take + # assign the upcoming label to the current token. + # e.g. the "12" in 12h" + hms_idx = idx + 1 + + elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and + info.hms(tokens[idx+2]) is not None): + # There is a space and then an "h", "m", or "s" label. + # e.g. the "12" in "12 h" + hms_idx = idx + 2 + + elif idx > 0 and info.hms(tokens[idx-1]) is not None: + # There is a "h", "m", or "s" preceeding this token. Since neither + # of the previous cases was hit, there is no label following this + # token, so we use the previous label. + # e.g. the "04" in "12h04" + hms_idx = idx-1 + + elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and + info.hms(tokens[idx-2]) is not None): + # If we are looking at the final token, we allow for a + # backward-looking check to skip over a space. + # TODO: Are we sure this is the right condition here? + hms_idx = idx - 2 + + else: + hms_idx = None + + return hms_idx + + def _assign_hms(self, res, value_repr, hms): + # See GH issue #427, fixing float rounding + value = self._to_decimal(value_repr) + + if hms == 0: + # Hour + res.hour = int(value) + if value % 1: + res.minute = int(60*(value % 1)) + + elif hms == 1: + (res.minute, res.second) = self._parse_min_sec(value) + + elif hms == 2: + (res.second, res.microsecond) = self._parsems(value_repr) + + def _could_be_tzname(self, hour, tzname, tzoffset, token): + return (hour is not None and + tzname is None and + tzoffset is None and + len(token) <= 5 and + (all(x in string.ascii_uppercase for x in token) + or token in self.info.UTCZONE)) + + def _ampm_valid(self, hour, ampm, fuzzy): + """ + For fuzzy parsing, 'a' or 'am' (both valid English words) + may erroneously trigger the AM/PM flag. Deal with that + here. + """ + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with AM or PM flag.') + elif not 0 <= hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for 12-hour clock.') + + return val_is_ampm + + def _adjust_ampm(self, hour, ampm): + if hour < 12 and ampm == 1: + hour += 12 + elif hour == 12 and ampm == 0: + hour = 0 + return hour + + def _parse_min_sec(self, value): + # TODO: Every usage of this function sets res.second to the return + # value. Are there any cases where second will be returned as None and + # we *dont* want to set res.second = None? + minute = int(value) + second = None + + sec_remainder = value % 1 + if sec_remainder: + second = int(60 * sec_remainder) + return (minute, second) + + def _parsems(self, value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + def _parse_hms(self, idx, tokens, info, hms_idx): + # TODO: Is this going to admit a lot of false-positives for when we + # just happen to have digits and "h", "m" or "s" characters in non-date + # text? I guess hex hashes won't have that problem, but there's plenty + # of random junk out there. + if hms_idx is None: + hms = None + new_idx = idx + elif hms_idx > idx: + hms = info.hms(tokens[hms_idx]) + new_idx = hms_idx + else: + # Looking backwards, increment one. + hms = info.hms(tokens[hms_idx]) + 1 + new_idx = idx + + return (new_idx, hms) + + def _recombine_skipped(self, tokens, skipped_idxs): + """ + >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] + >>> skipped_idxs = [0, 1, 2, 5] + >>> _recombine_skipped(tokens, skipped_idxs) + ["foo bar", "baz"] + """ + skipped_tokens = [] + for i, idx in enumerate(sorted(skipped_idxs)): + if i > 0 and idx - 1 == skipped_idxs[i - 1]: + skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] + else: + skipped_tokens.append(tokens[idx]) + + return skipped_tokens + + def _build_tzinfo(self, tzinfos, tzname, tzoffset): + if callable(tzinfos): + tzdata = tzinfos(tzname, tzoffset) + else: + tzdata = tzinfos.get(tzname) + # handle case where tzinfo is paased an options that returns None + # eg tzinfos = {'BRST' : None} + if isinstance(tzdata, datetime.tzinfo) or tzdata is None: + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(tzname, tzdata) + return tzinfo + + def _build_tzaware(self, naive, res, tzinfos): + if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): + tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) + aware = naive.replace(tzinfo=tzinfo) + aware = self._assign_tzname(aware, res.tzname) + + elif res.tzname and res.tzname in time.tzname: + aware = naive.replace(tzinfo=tz.tzlocal()) + + # Handle ambiguous local datetime + aware = self._assign_tzname(aware, res.tzname) + + # This is mostly relevant for winter GMT zones parsed in the UK + if (aware.tzname() != res.tzname and + res.tzname in self.info.UTCZONE): + aware = aware.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset == 0: + aware = naive.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset: + aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + elif not res.tzname and not res.tzoffset: + # i.e. no timezone information was found. + aware = naive + + elif res.tzname: + # tz-like string was parsed but we don't know what to do + # with it + warnings.warn("tzname {tzname} identified but not understood. " + "Pass `tzinfos` argument in order to correctly " + "return a timezone-aware datetime. In a future " + "version, this will raise an " + "exception.".format(tzname=res.tzname), + category=UnknownTimezoneWarning) + aware = naive + + return aware + + def _build_naive(self, res, default): + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back + # to the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + naive = default.replace(**repl) + + if res.weekday is not None and not res.day: + naive = naive + relativedelta.relativedelta(weekday=res.weekday) + + return naive + + def _assign_tzname(self, dt, tzname): + if dt.tzname() != tzname: + new_dt = tz.enfold(dt, fold=1) + if new_dt.tzname() == tzname: + return new_dt + + return dt + + def _to_decimal(self, val): + try: + decimal_value = Decimal(val) + # See GH 662, edge case, infinite value should not be converted via `_to_decimal` + if not decimal_value.is_finite(): + raise ValueError("Converted decimal value is infinite or NaN") + except Exception as e: + msg = "Could not convert %s to decimal" % val + six.raise_from(ValueError(msg), e) + else: + return decimal_value + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] + used_idxs = list() + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + + for ii in range(j): + used_idxs.append(ii) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + used_idxs.append(i) + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) * signal) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i]) * 3600 + + int(l[i + 2]) * 60) * signal) + used_idxs.append(i) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2]) * 3600 * signal) + else: + return None + used_idxs.append(i) + i += 1 + if res.dstabbr: + break + else: + break + + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789+-"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + used_idxs.append(i) + i += 2 + if l[i] == '-': + value = int(l[i + 1]) * -1 + used_idxs.append(i) + i += 1 + else: + value = int(l[i]) + used_idxs.append(i) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i]) - 1) % 7 + else: + x.day = int(l[i]) + used_idxs.append(i) + i += 2 + x.time = int(l[i]) + used_idxs.append(i) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + used_idxs.append(i) + i += 1 + else: + signal = 1 + used_idxs.append(i) + res.dstoffset = (res.stdoffset + int(l[i]) * signal) + + # This was a made-up format that is not in normal use + warn(('Parsed time zone "%s"' % tzstr) + + 'is in a non-standard dateutil-specific format, which ' + + 'is now deprecated; support for parsing this format ' + + 'will be removed in future versions. It is recommended ' + + 'that you switch to a standard format like the GNU ' + + 'TZ variable format.', tz.DeprecatedTzFormatWarning) + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + used_idxs.append(i) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + used_idxs.append(i) + i += 1 + x.month = int(l[i]) + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.weekday = (int(l[i]) - 1) % 7 + else: + # year day (zero based) + x.yday = int(l[i]) + 1 + + used_idxs.append(i) + i += 1 + + if i < len_l and l[i] == '/': + used_idxs.append(i) + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 + used_idxs.append(i) + i += 2 + if i + 1 < len_l and l[i + 1] == ':': + used_idxs.append(i) + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2]) * 3600) + else: + return None + used_idxs.append(i) + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + unused_idxs = set(range(len_l)).difference(used_idxs) + res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + +class UnknownTimezoneWarning(RuntimeWarning): + """Raised when the parser finds a timezone it cannot parse into a tzinfo""" +# vim:ts=4:sw=4:et diff --git a/utill/dateutil/parser/isoparser.py b/utill/dateutil/parser/isoparser.py new file mode 100644 index 0000000..e3cf6d8 --- /dev/null +++ b/utill/dateutil/parser/isoparser.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +""" +This module offers a parser for ISO-8601 strings + +It is intended to support all valid date, time and datetime formats per the +ISO-8601 specification. + +..versionadded:: 2.7.0 +""" +from datetime import datetime, timedelta, time, date +import calendar +from dateutil import tz + +from functools import wraps + +import re +import six + +__all__ = ["isoparse", "isoparser"] + + +def _takes_ascii(f): + @wraps(f) + def func(self, str_in, *args, **kwargs): + # If it's a stream, read the whole thing + str_in = getattr(str_in, 'read', lambda: str_in)() + + # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII + if isinstance(str_in, six.text_type): + # ASCII is the same in UTF-8 + try: + str_in = str_in.encode('ascii') + except UnicodeEncodeError as e: + msg = 'ISO-8601 strings should contain only ASCII characters' + six.raise_from(ValueError(msg), e) + + return f(self, str_in, *args, **kwargs) + + return func + + +class isoparser(object): + def __init__(self, sep=None): + """ + :param sep: + A single character that separates date and time portions. If + ``None``, the parser will accept any single character. + For strict ISO-8601 adherence, pass ``'T'``. + """ + if sep is not None: + if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): + raise ValueError('Separator must be a single, non-numeric ' + + 'ASCII character') + + sep = sep.encode('ascii') + + self._sep = sep + + @_takes_ascii + def isoparse(self, dt_str): + """ + Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. + + An ISO-8601 datetime string consists of a date portion, followed + optionally by a time portion - the date and time portions are separated + by a single character separator, which is ``T`` in the official + standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be + combined with a time portion. + + Supported date formats are: + + Common: + + - ``YYYY`` + - ``YYYY-MM`` or ``YYYYMM`` + - ``YYYY-MM-DD`` or ``YYYYMMDD`` + + Uncommon: + + - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) + - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day + + The ISO week and day numbering follows the same logic as + :func:`datetime.date.isocalendar`. + + Supported time formats are: + + - ``hh`` + - ``hh:mm`` or ``hhmm`` + - ``hh:mm:ss`` or ``hhmmss`` + - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) + + Midnight is a special case for `hh`, as the standard supports both + 00:00 and 24:00 as a representation. The decimal separator can be + either a dot or a comma. + + + .. caution:: + + Support for fractional components other than seconds is part of the + ISO-8601 standard, but is not currently implemented in this parser. + + Supported time zone offset formats are: + + - `Z` (UTC) + - `±HH:MM` + - `±HHMM` + - `±HH` + + Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, + with the exception of UTC, which will be represented as + :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such + as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. + + :param dt_str: + A string or stream containing only an ISO-8601 datetime string + + :return: + Returns a :class:`datetime.datetime` representing the string. + Unspecified components default to their lowest value. + + .. warning:: + + As of version 2.7.0, the strictness of the parser should not be + considered a stable part of the contract. Any valid ISO-8601 string + that parses correctly with the default settings will continue to + parse correctly in future versions, but invalid strings that + currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not + guaranteed to continue failing in future versions if they encode + a valid date. + + .. versionadded:: 2.7.0 + """ + components, pos = self._parse_isodate(dt_str) + + if len(dt_str) > pos: + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) + else: + raise ValueError('String contains unknown ISO components') + + if len(components) > 3 and components[3] == 24: + components[3] = 0 + return datetime(*components) + timedelta(days=1) + + return datetime(*components) + + @_takes_ascii + def parse_isodate(self, datestr): + """ + Parse the date portion of an ISO string. + + :param datestr: + The string portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.date` object + """ + components, pos = self._parse_isodate(datestr) + if pos < len(datestr): + raise ValueError('String contains unknown ISO ' + + 'components: {}'.format(datestr)) + return date(*components) + + @_takes_ascii + def parse_isotime(self, timestr): + """ + Parse the time portion of an ISO string. + + :param timestr: + The time portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.time` object + """ + components = self._parse_isotime(timestr) + if components[0] == 24: + components[0] = 0 + return time(*components) + + @_takes_ascii + def parse_tzstr(self, tzstr, zero_as_utc=True): + """ + Parse a valid ISO time zone string. + + See :func:`isoparser.isoparse` for details on supported formats. + + :param tzstr: + A string representing an ISO time zone offset + + :param zero_as_utc: + Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones + + :return: + Returns :class:`dateutil.tz.tzoffset` for offsets and + :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is + specified) offsets equivalent to UTC. + """ + return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + + # Constants + _DATE_SEP = b'-' + _TIME_SEP = b':' + _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') + + def _parse_isodate(self, dt_str): + try: + return self._parse_isodate_common(dt_str) + except ValueError: + return self._parse_isodate_uncommon(dt_str) + + def _parse_isodate_common(self, dt_str): + len_str = len(dt_str) + components = [1, 1, 1] + + if len_str < 4: + raise ValueError('ISO string too short') + + # Year + components[0] = int(dt_str[0:4]) + pos = 4 + if pos >= len_str: + return components, pos + + has_sep = dt_str[pos:pos + 1] == self._DATE_SEP + if has_sep: + pos += 1 + + # Month + if len_str - pos < 2: + raise ValueError('Invalid common month') + + components[1] = int(dt_str[pos:pos + 2]) + pos += 2 + + if pos >= len_str: + if has_sep: + return components, pos + else: + raise ValueError('Invalid ISO format') + + if has_sep: + if dt_str[pos:pos + 1] != self._DATE_SEP: + raise ValueError('Invalid separator in ISO string') + pos += 1 + + # Day + if len_str - pos < 2: + raise ValueError('Invalid common day') + components[2] = int(dt_str[pos:pos + 2]) + return components, pos + 2 + + def _parse_isodate_uncommon(self, dt_str): + if len(dt_str) < 4: + raise ValueError('ISO string too short') + + # All ISO formats start with the year + year = int(dt_str[0:4]) + + has_sep = dt_str[4:5] == self._DATE_SEP + + pos = 4 + has_sep # Skip '-' if it's there + if dt_str[pos:pos + 1] == b'W': + # YYYY-?Www-?D? + pos += 1 + weekno = int(dt_str[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dt_str) > pos: + if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: + raise ValueError('Inconsistent use of dash separator') + + pos += has_sep + + dayno = int(dt_str[pos:pos + 1]) + pos += 1 + + base_date = self._calculate_weekdate(year, weekno, dayno) + else: + # YYYYDDD or YYYY-DDD + if len(dt_str) - pos < 3: + raise ValueError('Invalid ordinal day') + + ordinal_day = int(dt_str[pos:pos + 3]) + pos += 3 + + if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): + raise ValueError('Invalid ordinal day' + + ' {} for year {}'.format(ordinal_day, year)) + + base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) + + components = [base_date.year, base_date.month, base_date.day] + return components, pos + + def _calculate_weekdate(self, year, week, day): + """ + Calculate the day of corresponding to the ISO year-week-day calendar. + + This function is effectively the inverse of + :func:`datetime.date.isocalendar`. + + :param year: + The year in the ISO calendar + + :param week: + The week in the ISO calendar - range is [1, 53] + + :param day: + The day in the ISO calendar - range is [1 (MON), 7 (SUN)] + + :return: + Returns a :class:`datetime.date` + """ + if not 0 < week < 54: + raise ValueError('Invalid week: {}'.format(week)) + + if not 0 < day < 8: # Range is 1-7 + raise ValueError('Invalid weekday: {}'.format(day)) + + # Get week 1 for the specific year: + jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it + week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) + + # Now add the specific number of weeks and days to get what we want + week_offset = (week - 1) * 7 + (day - 1) + return week_1 + timedelta(days=week_offset) + + def _parse_isotime(self, timestr): + len_str = len(timestr) + components = [0, 0, 0, 0, None] + pos = 0 + comp = -1 + + if len(timestr) < 2: + raise ValueError('ISO time too short') + + has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP + + while pos < len_str and comp < 5: + comp += 1 + + if timestr[pos:pos + 1] in b'-+Zz': + # Detect time zone boundary + components[-1] = self._parse_tzstr(timestr[pos:]) + pos = len_str + break + + if comp < 3: + # Hour, minute, second + components[comp] = int(timestr[pos:pos + 2]) + pos += 2 + if (has_sep and pos < len_str and + timestr[pos:pos + 1] == self._TIME_SEP): + pos += 1 + + if comp == 3: + # Fraction of a second + frac = self._FRACTION_REGEX.match(timestr[pos:]) + if not frac: + continue + + us_str = frac.group(1)[:6] # Truncate to microseconds + components[comp] = int(us_str) * 10**(6 - len(us_str)) + pos += len(frac.group()) + + if pos < len_str: + raise ValueError('Unused components in ISO string') + + if components[0] == 24: + # Standard supports 00:00 and 24:00 as representations of midnight + if any(component != 0 for component in components[1:4]): + raise ValueError('Hour may only be 24 at 24:00:00.000') + + return components + + def _parse_tzstr(self, tzstr, zero_as_utc=True): + if tzstr == b'Z' or tzstr == b'z': + return tz.tzutc() + + if len(tzstr) not in {3, 5, 6}: + raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') + + if tzstr[0:1] == b'-': + mult = -1 + elif tzstr[0:1] == b'+': + mult = 1 + else: + raise ValueError('Time zone offset requires sign') + + hours = int(tzstr[1:3]) + if len(tzstr) == 3: + minutes = 0 + else: + minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) + + if zero_as_utc and hours == 0 and minutes == 0: + return tz.tzutc() + else: + if minutes > 59: + raise ValueError('Invalid minutes in time zone offset') + + if hours > 23: + raise ValueError('Invalid hours in time zone offset') + + return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) + + +DEFAULT_ISOPARSER = isoparser() +isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/utill/dateutil/relativedelta.py b/utill/dateutil/relativedelta.py new file mode 100644 index 0000000..c65c66e --- /dev/null +++ b/utill/dateutil/relativedelta.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is designed to be applied to an existing datetime and + can replace specific components of that datetime, or represents an interval + of time. + + It is based on the specification of the excellent work done by M.-A. Lemburg + in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc) available in the + relativedelta module. These instances may receive a parameter N, + specifying the Nth weekday, which could be positive or negative + (like MO(+1) or MO(-2)). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. This argument is always + relative e.g. if the calculated date is already Monday, using MO(1) + or MO(-1) won't change the day. To effectively make it absolute, use + it in combination with the day argument (e.g. day=1, MO(1) for first + Monday of the month). + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> from datetime import datetime + >>> from dateutil.relativedelta import relativedelta, MO + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + >>> dt + delta + datetime.datetime(2018, 4, 2, 14, 37) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=+1, hours=+14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/utill/dateutil/rrule.py b/utill/dateutil/rrule.py new file mode 100644 index 0000000..20a0c4a --- /dev/null +++ b/utill/dateutil/rrule.py @@ -0,0 +1,1736 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import re +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase +from .tz import tzutc, tzlocal + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + If given, this determines how many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param until: + If given, this must be a datetime instance specifying the upper-bound + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + + + +class _rrulestr(object): + """ Parses a string representation of a recurrence rule or set of + recurrence rules. + + :param s: + Required, a string defining one or more recurrence rules. + + :param dtstart: + If given, used as the default recurrence start if not specified in the + rule string. + + :param cache: + If set ``True`` caching of results will be enabled, improving + performance of multiple queries considerably. + + :param unfold: + If set ``True`` indicates that a rule string is split over more + than one line and should be joined before processing. + + :param forceset: + If set ``True`` forces a :class:`dateutil.rrule.rruleset` to + be returned. + + :param compatible: + If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime.datetime` object is returned. + + :param tzids: + If given, a callable or mapping used to retrieve a + :class:`datetime.tzinfo` from a string representation. + Defaults to :func:`dateutil.tz.gettz`. + + :param tzinfos: + Additional time zone names / aliases which may be present in a string + representation. See :func:`dateutil.parser.parse` for more + information. + + :return: + Returns a :class:`dateutil.rrule.rruleset` or + :class:`dateutil.rrule.rrule` + """ + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_date_value(self, date_value, parms, rule_tzids, + ignoretz, tzids, tzinfos): + global parser + if not parser: + from dateutil import parser + + datevals = [] + value_found = False + TZID = None + + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = rule_tzids[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, mapping, or None, ' + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found + # only once. + if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: + raise ValueError("unsupported parm: " + parm) + else: + if value_found: + msg = ("Duplicate value parameter found in: " + parm) + raise ValueError(msg) + value_found = True + + for datestr in date_value.split(','): + date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) + if TZID is not None: + if date.tzinfo is None: + date = date.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART/EXDATE specifies multiple timezone') + datevals.append(date) + + return datevals + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + exdatevals.extend( + self._parse_date_value(value, parms, + TZID_NAMES, ignoretz, + tzids, tzinfos) + ) + elif name == "DTSTART": + dtvals = self._parse_date_value(value, parms, TZID_NAMES, + ignoretz, tzids, tzinfos) + if len(dtvals) != 1: + raise ValueError("Multiple DTSTART values specified:" + + value) + dtstart = dtvals[0] + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + rset.exdate(value) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/utill/dateutil/tz/__init__.py b/utill/dateutil/tz/__init__.py new file mode 100644 index 0000000..5a2d9cd --- /dev/null +++ b/utill/dateutil/tz/__init__.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +from .tz import * +from .tz import __doc__ + +#: Convenience constant providing a :class:`tzutc()` instance +#: +#: .. versionadded:: 2.7.0 +UTC = tzutc() + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists", + "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] + + +class DeprecatedTzFormatWarning(Warning): + """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/utill/dateutil/tz/_common.py b/utill/dateutil/tz/_common.py new file mode 100644 index 0000000..594e082 --- /dev/null +++ b/utill/dateutil/tz/_common.py @@ -0,0 +1,419 @@ +from six import PY2 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + if PY2: + @wraps(namefunc) + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None: + name = name.encode() + + return name + + return adjust_encoding + else: + return namefunc + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + def replace(self, *args, **kwargs): + """ + Return a datetime with the same attributes, except for those + attributes given new values by whichever keyword arguments are + specified. Note that tzinfo=None can be specified to create a naive + datetime from an aware datetime with no conversion of date and time + data. + + This is reimplemented in ``_DatetimeWithFold`` because pypy3 will + return a ``datetime.datetime`` even if ``fold`` is unchanged. + """ + argnames = ( + 'year', 'month', 'day', 'hour', 'minute', 'second', + 'microsecond', 'tzinfo' + ) + + for arg, argname in zip(args, argnames): + if argname in kwargs: + raise TypeError('Duplicate argument: {}'.format(argname)) + + kwargs[argname] = arg + + for argname in argnames: + if argname not in kwargs: + kwargs[argname] = getattr(self, argname) + + dt_class = self.__class__ if kwargs.get('fold', 1) else datetime + + return dt_class(**kwargs) + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurance, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ diff --git a/utill/dateutil/tz/_factories.py b/utill/dateutil/tz/_factories.py new file mode 100644 index 0000000..d2560eb --- /dev/null +++ b/utill/dateutil/tz/_factories.py @@ -0,0 +1,73 @@ +from datetime import timedelta +import weakref +from collections import OrderedDict + + +class _TzSingleton(type): + def __init__(cls, *args, **kwargs): + cls.__instance = None + super(_TzSingleton, cls).__init__(*args, **kwargs) + + def __call__(cls): + if cls.__instance is None: + cls.__instance = super(_TzSingleton, cls).__call__() + return cls.__instance + + +class _TzFactory(type): + def instance(cls, *args, **kwargs): + """Alternate constructor that returns a fresh instance""" + return type.__call__(cls, *args, **kwargs) + + +class _TzOffsetFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + def __call__(cls, name, offset): + if isinstance(offset, timedelta): + key = (name, offset.total_seconds()) + else: + key = (name, offset) + + instance = cls.__instances.get(key, None) + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(name, offset)) + + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + # Remove an item if the strong cache is overpopulated + # TODO: Maybe this should be under a lock? + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + + +class _TzStrFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + def __call__(cls, s, posix_offset=False): + key = (s, posix_offset) + instance = cls.__instances.get(key, None) + + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(s, posix_offset)) + + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + + # Remove an item if the strong cache is overpopulated + # TODO: Maybe this should be under a lock? + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + diff --git a/utill/dateutil/tz/tz.py b/utill/dateutil/tz/tz.py new file mode 100644 index 0000000..d05414e --- /dev/null +++ b/utill/dateutil/tz/tz.py @@ -0,0 +1,1836 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format +files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, +etc), TZ environment string (in all known formats), given ranges (with help +from relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect +import weakref +from collections import OrderedDict + +import six +from six import string_types +from six.moves import _thread +from ._common import tzname_in_python2, _tzinfo +from ._common import tzrangebase, enfold +from ._common import _validate_fromutc_inputs + +from ._factories import _TzSingleton, _TzOffsetFactory +from ._factories import _TzStrFactory +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +# For warning about rounding tzinfo +from warnings import warn + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +@six.add_metaclass(_TzSingleton) +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + + **Examples:** + + .. doctest:: + + >>> from datetime import * + >>> from dateutil.tz import * + + >>> datetime.now() + datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) + + >>> datetime.now(tzutc()) + datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) + + >>> datetime.now(tzutc()).tzname() + 'UTC' + + .. versionchanged:: 2.7.0 + ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will + always return the same object. + + .. doctest:: + + >>> from dateutil.tz import tzutc, UTC + >>> tzutc() is tzutc() + True + >>> tzutc() is UTC + True + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +@six.add_metaclass(_TzOffsetFactory) +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object). + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = offset.total_seconds() + except (TypeError, AttributeError): + pass + + self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(self._offset.total_seconds())) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + self._tznames = tuple(time.tzname) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._tznames[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if isinstance(other, tzlocal): + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + elif isinstance(other, tzutc): + return (not self._hasdst and + self._tznames[0] in {'UTC', 'GMT'} and + self._std_offset == ZERO) + elif isinstance(other, tzoffset): + return (not self._hasdst and + self._tznames[0] == other._name and + self._std_offset == other._offset) + else: + return NotImplemented + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. + Time zone files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + + .. note:: + + Only construct a ``tzfile`` directly if you have a specific timezone + file on disk that you want to read into a Python ``tzinfo`` object. + If you want to get a ``tzfile`` representing a specific IANA zone, + (e.g. ``'America/New_York'``), you should call + :func:`dateutil.tz.gettz` with the zone identifier. + + + **Examples:** + + Using the US Eastern time zone as an example, we can see that a ``tzfile`` + provides time zone information for the standard Daylight Saving offsets: + + .. testsetup:: tzfile + + from dateutil.tz import gettz + from datetime import datetime + + .. doctest:: tzfile + + >>> NYC = gettz('America/New_York') + >>> NYC + tzfile('/usr/share/zoneinfo/America/New_York') + + >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST + 2016-01-03 00:00:00-05:00 + + >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT + 2016-07-07 00:00:00-04:00 + + + The ``tzfile`` structure contains a fully history of the time zone, + so historical dates will also have the right offsets. For example, before + the adoption of the UTC standards, New York used local solar mean time: + + .. doctest:: tzfile + + >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT + 1901-04-12 00:00:00-04:56 + + And during World War II, New York was on "Eastern War Time", which was a + state of permanent daylight saving time: + + .. doctest:: tzfile + + >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT + 1944-02-07 00:00:00-04:00 + + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _nullcontext(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + gmtoff = _get_supported_offset(gmtoff) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + lastdst = None + lastoffset = None + lastdstoffset = None + lastbaseoffset = None + out.trans_list = [] + + for i, tti in enumerate(out.trans_idx): + offset = tti.offset + dstoffset = 0 + + if lastdst is not None: + if tti.isdst: + if not lastdst: + dstoffset = offset - lastoffset + + if not dstoffset and lastdstoffset: + dstoffset = lastdstoffset + + tti.dstoffset = datetime.timedelta(seconds=dstoffset) + lastdstoffset = dstoffset + + # If a time zone changes its base offset during a DST transition, + # then you need to adjust by the previous base offset to get the + # transition time in local time. Otherwise you use the current + # base offset. Ideally, I would have some mathematical proof of + # why this is true, but I haven't really thought about it enough. + baseoffset = offset - dstoffset + adjustment = baseoffset + if (lastbaseoffset is not None and baseoffset != lastbaseoffset + and tti.isdst != lastdst): + # The base DST has changed + adjustment = lastbaseoffset + + lastdst = tti.isdst + lastoffset = offset + lastbaseoffset = baseoffset + + out.trans_list.append(out.trans_list_utc[i] + adjustment) + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To + specify, for example, that DST starts at 2AM on the 2nd Sunday in + March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent + representing the time and time of year that daylight savings time + ends, with the same specification method as in ``start``. One note is + that this should point to the first time in the *standard* zone, so if + a transition occurs at 2AM in the DST zone and the clocks are set back + 1 hour to 1AM, set the ``hours`` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = stdoffset.total_seconds() + except (TypeError, AttributeError): + pass + + try: + dstoffset = dstoffset.total_seconds() + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +@six.add_metaclass(_TzStrFactory) +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: + :class:`unicode`) or a stream emitting unicode characters + (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. caution:: + + Prior to version 2.7.0, this function also supported time zones + in the format: + + * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` + * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` + + This format is non-standard and has been deprecated; this function + will raise a :class:`DeprecatedTZFormatWarning` until + support is removed in a future version. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil.parser import _parser as parser + + self._s = s + + res = parser._parsetz(s) + if res is None or res.any_unused_tokens: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + self._cache_lock = _thread.allocate_lock() + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + with self._cache_lock: + return self._cachecomp[self._cachedate.index( + (dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + with self._cache_lock: + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _nullcontext(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + # DTSTART in VTIMEZONE takes a subset of valid RRULE + # values under RFC 5545. + for parm in parms: + if parm != 'VALUE=DATE-TIME': + msg = ('Unsupported DTSTART param in ' + + 'VTIMEZONE: ' + parm) + raise ValueError(msg) + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def __get_gettz(): + tzlocal_classes = (tzlocal,) + if tzwinlocal is not None: + tzlocal_classes += (tzwinlocal,) + + class GettzFunc(object): + """ + Retrieve a time zone object from a string representation + + This function is intended to retrieve the :py:class:`tzinfo` subclass + that best represents the time zone that would be used if a POSIX + `TZ variable`_ were set to the same value. + + If no argument or an empty string is passed to ``gettz``, local time + is returned: + + .. code-block:: python3 + + >>> gettz() + tzfile('/etc/localtime') + + This function is also the preferred way to map IANA tz database keys + to :class:`tzfile` objects: + + .. code-block:: python3 + + >>> gettz('Pacific/Kiritimati') + tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') + + On Windows, the standard is extended to include the Windows-specific + zone names provided by the operating system: + + .. code-block:: python3 + + >>> gettz('Egypt Standard Time') + tzwin('Egypt Standard Time') + + Passing a GNU ``TZ`` style string time zone specification returns a + :class:`tzstr` object: + + .. code-block:: python3 + + >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + + :param name: + A time zone name (IANA, or, on Windows, Windows keys), location of + a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone + specifier. An empty string, no argument or ``None`` is interpreted + as local time. + + :return: + Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` + subclasses. + + .. versionchanged:: 2.7.0 + + After version 2.7.0, any two calls to ``gettz`` using the same + input strings will return the same object: + + .. code-block:: python3 + + >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') + True + + In addition to improving performance, this ensures that + `"same zone" semantics`_ are used for datetimes in the same zone. + + + .. _`TZ variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + + .. _`"same zone" semantics`: + https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html + """ + def __init__(self): + + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache_size = 8 + self.__strong_cache = OrderedDict() + self._cache_lock = _thread.allocate_lock() + + def __call__(self, name=None): + with self._cache_lock: + rv = self.__instances.get(name, None) + + if rv is None: + rv = self.nocache(name=name) + if not (name is None + or isinstance(rv, tzlocal_classes) + or rv is None): + # tzlocal is slightly more complicated than the other + # time zone providers because it depends on environment + # at construction time, so don't cache that. + # + # We also cannot store weak references to None, so we + # will also not store that. + self.__instances[name] = rv + else: + # No need for strong caching, return immediately + return rv + + self.__strong_cache[name] = self.__strong_cache.pop(name, rv) + + if len(self.__strong_cache) > self.__strong_cache_size: + self.__strong_cache.popitem(last=False) + + return rv + + def set_cache_size(self, size): + with self._cache_lock: + self.__strong_cache_size = size + while len(self.__strong_cache) > size: + self.__strong_cache.popitem(last=False) + + def cache_clear(self): + with self._cache_lock: + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache.clear() + + @staticmethod + def nocache(name=None): + """A non-cached version of gettz""" + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[1:] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except (WindowsError, UnicodeEncodeError): + # UnicodeEncodeError is for Python 2.7 compat + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name is not a tzstr unless it has at least + # one offset. For short values of "name", an + # explicit for loop seems to be the fastest way + # To determine if a string contains a digit + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + + return GettzFunc() + + +gettz = __get_gettz() +del __get_gettz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in + ``tz``. + + .. versionadded:: 2.7.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except Exception: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def resolve_imaginary(dt): + """ + Given a datetime that may be imaginary, return an existing datetime. + + This function assumes that an imaginary datetime represents what the + wall time would be in a zone had the offset transition not occurred, so + it will always fall forward by the transition's change in offset. + + .. doctest:: + + >>> from dateutil import tz + >>> from datetime import datetime + >>> NYC = tz.gettz('America/New_York') + >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) + 2017-03-12 03:30:00-04:00 + + >>> KIR = tz.gettz('Pacific/Kiritimati') + >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) + 1995-01-02 12:30:00+14:00 + + As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, + existing datetime, so a round-trip to and from UTC is sufficient to get + an extant datetime, however, this generally "falls back" to an earlier time + rather than falling forward to the STD side (though no guarantees are made + about this behavior). + + :param dt: + A :class:`datetime.datetime` which may or may not exist. + + :return: + Returns an existing :class:`datetime.datetime`. If ``dt`` was not + imaginary, the datetime returned is guaranteed to be the same object + passed to the function. + + .. versionadded:: 2.7.0 + """ + if dt.tzinfo is not None and not datetime_exists(dt): + + curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() + old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() + + dt += curr_offset - old_offset + + return dt + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in + seconds since January 1, 1970, ignoring the time zone. + """ + return (dt.replace(tzinfo=None) - EPOCH).total_seconds() + + +if sys.version_info >= (3, 6): + def _get_supported_offset(second_offset): + return second_offset +else: + def _get_supported_offset(second_offset): + # For python pre-3.6, round to full-minutes if that's not the case. + # Python's datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 + # for some information. + old_offset = second_offset + calculated_offset = 60 * ((second_offset + 30) // 60) + return calculated_offset + + +try: + # Python 3.7 feature + from contextmanager import nullcontext as _nullcontext +except ImportError: + class _nullcontext(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/utill/dateutil/tz/win.py b/utill/dateutil/tz/win.py new file mode 100644 index 0000000..cde07ba --- /dev/null +++ b/utill/dateutil/tz/win.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +""" +This module provides an interface to the native time zone data on Windows, +including :py:class:`datetime.tzinfo` implementations. + +Attempting to import this module on a non-Windows platform will raise an +:py:obj:`ImportError`. +""" +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing ``tzres.dll``, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + .. note:: + + Offsets found in the registry are generally of the form + ``@tzres.dll,-114``. The offset in this case is 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + """ + Return the display name of the time zone. + """ + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + """ + Time zone object created from the zone info in the Windows registry + + These are similar to :py:class:`dateutil.tz.tzrange` objects in that + the time zone data is provided in the format of a single offset rule + for either 0 or 2 time zone transitions per year. + + :param: name + The name of a Windows time zone key, e.g. "Eastern Standard Time". + The full list of keys can be retrieved with :func:`tzwin.list`. + """ + + def __init__(self, name): + self._name = name + + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + """ + Class representing the local time zone information in the Windows registry + + While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` + module) to retrieve time zone information, ``tzwinlocal`` retrieves the + rules directly from the Windows registry and creates an object like + :class:`dateutil.tz.tzwin`. + + Because Windows does not have an equivalent of :func:`time.tzset`, on + Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the + time zone settings *at the time that the process was started*, meaning + changes to the machine's time zone settings during the run of a program + on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. + Because ``tzwinlocal`` reads the registry directly, it is unaffected by + this issue. + """ + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/utill/dateutil/tzwin.py b/utill/dateutil/tzwin.py new file mode 100644 index 0000000..cebc673 --- /dev/null +++ b/utill/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/utill/dateutil/utils.py b/utill/dateutil/utils.py new file mode 100644 index 0000000..ebcce6a --- /dev/null +++ b/utill/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may a negilible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/utill/dateutil/zoneinfo/__init__.py b/utill/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000..34f11ad --- /dev/null +++ b/utill/dateutil/zoneinfo/__init__.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO + +from dateutil.tz import tzfile as _tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + + +class tzfile(_tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with TarFile.open(fileobj=zonefile_stream) as tf: + self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN} + # deal with links: They'll point to their parent object. Less + # waste of memory + links = {zl.name: self.zones[zl.linkname] + for zl in tf.getmembers() if + zl.islnk() or zl.issym()} + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = {} + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = [] + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/utill/dateutil/zoneinfo/rebuild.py b/utill/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000..78f0d1a --- /dev/null +++ b/utill/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,53 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call +from tarfile import TarFile + +from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ``ftp.iana.org/tz``. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with TarFile.open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with TarFile.open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/utill/db/model.py b/utill/db/model.py new file mode 100644 index 0000000..4bfdc47 --- /dev/null +++ b/utill/db/model.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +from .mysql import mysql +from .sqlite import sqlite +from kcweb import config +from kcweb import common +class model: + table=None + fields={} + __db=None + config=config.database + def __init__(self): + if not self.table: + self.table=self.__class__.__name__ + self.__db=common.M(self.table,self.config) + def create_table(self): + "创建表" + sqlist=[] + for k in self.fields.keys(): + sqlist.append(k+" "+self.fields[k]) + # print(self.table) + sqls="create table "+self.table+" (" + for k in sqlist: + sqls=sqls+k+", " + sqls=sqls[:-2]+")" + # print(sqls) + self.__db.execute(sqls) + def find(self): + return self.__db.find() + def select(self): + lists=self.__db.select() + # print(lists) + return lists + def insert(self,data): + return self.__db.insert(data) + def update(self,data): + return self.__db.update(data) + def startTrans(self): + "开启事务,仅对 update方法、delete方法、install方法有效" + self.__db.startTrans() + def commit(self): + """事务提交 + + 增删改后的任务进行提交 + """ + self.__db.commit() + def rollback(self): + """事务回滚 + + 增删改后的任务进行撤销 + """ + self.__db.rollback() + def where(self,where = None,*wheres): + """设置过滤条件 + + 传入方式: + "id",2 表示id='2' + + "id","in",2,3,4,5,6,...表示 id in (2,3,4,5,6,...) + + "id","or",2,3,4,5,6,...表示 id=2 or id=3 or id=4... + + [("id","gt",6000),"and",("name","like","%超")] 表示 ( id > "6000" and name LIKE "%超" ) + + "id","eq",1 表示 id = '1' + + eq 等于 + neq 不等于 + gt 大于 + egt 大于等于 + lt 小于 + elt 小于等于 + like LIKE + """ + self.__db.where(where,*wheres) + return self + def field(self,field = "*"): + """设置过滤显示条件 + + 参数 field:str 字符串 + """ + self.__db.field(field) + return self + __limit=[] + def limit(self,offset, length = None): + """设置查询数量 + + 参数 offset:int 起始位置 + + 参数 length:int 查询数量 + """ + self.__db.limit(offset, length) + return self + def order(self,strs=None,*strs1): + """设置排序查询 + + 传入方式: + + "id desc" + + "id",'name','appkey','asc' + + "id",'name','appkey' 不包含asc或desc的情况下 默认是desc + + ['id','taskid',{"task_id":"desc"}] + """ + self.__db.order(strs=None,*strs1) + return self + __distinct=None + def distinct(self,bools=None): + "用于返回唯一不同的值,配合field方法使用生效,消除所有重复的记录,并只获取唯一一次记录。" + self.__db.distinct(bools) + return self + def deltableall(self): + "删除当前数据库所有表格 mysql有效" + if self.conf['type']=='mysql': + a=self.__db.execute("SELECT concat('DROP TABLE IF EXISTS ', table_name, ';') FROM information_schema.tables WHERE table_schema = 'core1';") + for k in a: + self.__db.execute(k["concat('DROP TABLE IF EXISTS ', table_name, ';')"]) + + + + +class dbtype: + conf=model.config + def int(LEN=16,DEFAULT=False,NULL=False,UNIQUE=False,PRI=False,A_L=False): + # print(dbtype.conf['type']) + if dbtype.conf['type']=='mysql': + strs="INT("+str(LEN)+")" + if DEFAULT: + strs=strs+" DEFAULT "+str(DEFAULT) + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + if UNIQUE: + strs=strs+" UNIQUE" + if PRI: + strs=strs+" PRIMARY KEY" + if A_L: + strs=strs+" AUTO_INCREMENT" + else: + strs="INTEGER" + if DEFAULT: + strs=strs+" DEFAULT "+str(DEFAULT) + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + if UNIQUE: + strs=strs+" UNIQUE" + if PRI: + strs=strs+" PRIMARY KEY" + if A_L: + strs=strs+" AUTOINCREMENT" + return strs + def varchar(LEN=32,DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False,FULLTEXT=False): + strs="VARCHAR("+str(LEN)+")" + if DEFAULT: + strs=strs+" DEFAULT "+str(DEFAULT) + elif DEFAULT=='': + strs=strs+" DEFAULT ''" + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + if UNIQUE: + strs=strs+" UNIQUE" + if INDEX: + strs=strs+" INDEX" + if FULLTEXT: + strs=strs+" FULLTEXT" + return strs + def text(NULL=False): + if dbtype.conf['type']=='mysql': + strs="TEXT CHARACTER SET utf8 COLLATE utf8_general_ci" + else: + strs="TEXT" + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + return strs + def char(LEN=16,DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False): + strs=" CHAR("+str(LEN)+")" + if DEFAULT: + strs=strs+" DEFAULT "+str(DEFAULT) + elif DEFAULT=='': + strs=strs+" DEFAULT ''" + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + if UNIQUE: + strs=strs+" UNIQUE" + if INDEX: + strs=strs+" INDEX" + return strs + def decimat(LEN="10,2",DEFAULT=False,NULL=False,UNIQUE=False,INDEX=False): + "小数类型" + strs="DECIMAL("+str(LEN)+")" + if DEFAULT: + strs=strs+" DEFAULT "+str(DEFAULT) + elif DEFAULT=='': + strs=strs+" DEFAULT ''" + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + if UNIQUE: + strs=strs+" UNIQUE" + if INDEX: + strs=strs+" INDEX" + return strs + def date(NULL=False): + strs=" DATE" + if NULL: + strs=strs+" NULL" + else: + strs=strs+" NOT NULL" + return strs \ No newline at end of file diff --git a/utill/db/mongodb.py b/utill/db/mongodb.py new file mode 100644 index 0000000..960eaa4 --- /dev/null +++ b/utill/db/mongodb.py @@ -0,0 +1,332 @@ +# -*- coding: utf-8 -*- +import pymongo,re +from bson.objectid import ObjectId +from kcweb.config import mongo as mongodb +class mongo: + __config=mongodb + __clientobj=None + __dbobj=None + __tabobj=None + def __setconn(self): + if not self.__clientobj: + if self.__config['retryWrites']: + strs='mongodb://'+self.__config['host']+':'+self.__config['port']+'/' + else: + strs='mongodb://'+self.__config['host']+':'+self.__config['port']+'/?retryWrites=false' + self.__clientobj = pymongo.MongoClient(strs) + self.__dbobj = self.__clientobj[self.__config['db']] + if self.__config['user'] and self.__config['password']: + self.__dbobj.authenticate(self.__config['user'],self.__config['password']) #账号密码认证 + self.__tabobj=self.__dbobj[self.__table] + def connect(self,config): + """设置mongo链接信息 + + 参数 config 参考配置信息格式 可以设置数据库名(以字符串形式) + + 返回 mongodb对象 + """ + if config: + if isinstance(config,dict): + if "host" in config: + self.__config['host']=config['host'] + if "port" in config: + self.__config['port']=config['port'] + if "user" in config: + self.__config['user']=config['user'] + if "password" in config: + self.__config['password']=config['password'] + if "db" in config: + self.__config['db']=config['db'] + elif isinstance(config,str): + self.__config['db']=config + else: + print("config类型错误,设置连接不生效") + return self + def getobj(self): + "获取mongodb链接实例" + self.__setconn() + return self.__tabobj + def select(self,id=None): + """查询所有文档 + + 返回 文档列表 + """ + self.__setconn() + if id: + self.where('_id',id) + lists=[] + if self.__field: + arr=self.__tabobj.find(self.__where,self.__field) + else: + arr=self.__tabobj.find(self.__where) + if self.__limit: + if self.__limit[1]: + arr.limit(self.__limit[1]) + arr.skip(self.__limit[0]) + else: + arr.limit(self.__limit[0]) + for k in arr: + try: k['_id'] + except: pass + else: k['_id']=str(k['_id']) + lists.append(k) + return lists + def find(self,id=None): + """查询一条文档 + + 返回 文档内容 + """ + self.__setconn() + if id: + self.where('_id',id) + if self.__field: + arr = self.__tabobj.find_one(self.__where,self.__field) + else: + arr = self.__tabobj.find_one(self.__where) + try: arr['_id'] + except: pass + else: arr['_id']=str(arr['_id']) + return arr + def countlist(self): + """查询文档数量和所有文档 + + 返回 文档数量,文档列表 + """ + self.__setconn() + lists=[] + if self.__field: + arr=self.__tabobj.find(self.__where,self.__field) + else: + arr=self.__tabobj.find(self.__where) + if self.__limit: + if self.__limit[1]: + arr.limit(self.__limit[1]) + arr.skip(self.__limit[0]) + else: + arr.limit(self.__limit[0]) + for k in arr: + try: k['_id'] + except: pass + else: k['_id']=str(k['_id']) + lists.append(k) + return arr.count(),lists + def count(self): + """查询文档数量 + + 返回 文档数量 + """ + self.__setconn() + count=self.__tabobj.find(self.__where,{}).count() + return count + def update(self,data,multi=True): + """文档更新 + + 参数 data 要更新的内容 格式:{"name":"测试","age":20} + + multi 默认True 是否全部更新 + """ + #{ "count" : { $gt : 3 } } , { $set : { "test2" : "OK"} } + self.__setconn() + # print(self.__where) + # print({"$set":data}) + ar=self.__tabobj.update(self.__where,{"$set":data},multi=multi) + return ar + if ar: + return ar['nModified'] + else: + return 0 + def delete(self,id=None): + """文档删除 删除条件是where函数 + """ + self.__setconn() + if id: + self.where('_id',id) + if self.__where: + # print(self.__where) + # exit() + bo=self.__tabobj.remove(self.__where) + if bo: + return bo['n'] + else: + return 0 + else: + return 0 + def deleteAll(self,id=None): + """删所有文档除 + """ + self.__setconn() + bo=self.__tabobj.remove({}) + if bo: + return bo['n'] + else: + return 0 + def insert(self,dicts): + """插入文档 单条插入或多条插入 + + 参数 dicts 要插入的内容 单条格式:{"name":"测试","age":20} 。 多条格式:[{"name":"测试","age":20},{"name":"测试","age":20}] + + 返回插入的数量 + + """ + self.__setconn() + co=0 + if isinstance(dicts,dict): + if self.__tabobj.insert_one(dicts): + co=1 + elif isinstance(dicts,list): + lens=len(dicts) + if lens>100: + raise RuntimeError('列表数量超过最大限制100') + if self.__tabobj.insert_many(dicts): + co=lens + return co + __table="" + def table(self,table): + """设置集合名 + + 参数 table:str 表名 + """ + self.__table=table + return self + __where={} + def where(self,where = None,*wheres): + """设置过滤条件 + + 参数 where:str 字符串 或 列表 + + 传入方式: + + "id",2 表示id='2' + + "id","in",2,3,4 ...表示 id=2 or id=3 or id=4 ... + + "id","or",2,3,4 ...表示 id=2 or id=3 or id=4 ... + + "id","neq",1 表示 id 不等于 '1' + + eq 等于 + neq 不等于 + gt 大于 + egt 大于等于 + lt 小于 + elt 小于等于 + like LIKE + """ + # print("wheres",wheres) {'comments':re.compile('abc')} + if isinstance(where,dict): + self.__where=where + elif isinstance(where,list): + # import re [("name","eq",'冯坤'),"and",("aa","like",'%wfweaf')] + # print(re.compile('abc')) + #{"likes": {$gt:50}, "name": "冯坤","title": "MongoDB 教程"} + #{"likes":'dav', $or: [{"by": "菜鸟教程"},{"title": "MongoDB 教程"}]} + zd={} + t='' + for k in where: + if isinstance(k,tuple): + if k[1]=='eq': + if t=='or': + zd['$or'].append({k[0]:k[2]}) + else: + zd[k[0]]=k[2] + elif k[1]=='like': + if t=='or': + zd['$or'].append({k[0]:re.compile(re.sub('%','',k[2]))}) + else: + zd[k[0]]=re.compile(re.sub('%','',k[2])) + else: + if t=='or': + zd['$or'].append({k[0]:{'$'+k[1]:k[2]}}) + else: + n=self.__operator(k[1]) + zd[k[0]]={n:k[2]} + elif isinstance(k,str): + if k=='or': + t=k + zd['$or']=[] + self.__where=zd + # print(zd) + # exit() + elif isinstance(where,str) and len(wheres)==1: + wheres=list(wheres) + if where=='_id': + wheres[0]=ObjectId(wheres[0]) + self.__where[where]=wheres[0] + elif isinstance(where,str) and len(wheres)==2: + wheres=list(wheres) + if where=='_id': + wheres[1]=ObjectId(wheres[1]) + if wheres[0] == 'eq': + self.__where[where]=wheres[1] + elif wheres[0]=='like': + self.__where[where]=re.compile(re.sub('%','',wheres[1])) + else: + n=self.__operator(wheres[0]) + self.__where[where]={n:wheres[1]} + elif isinstance(where,str) and isinstance(wheres,tuple): + #{$or: [{key1: value1}, {key2:value2}]} + # self.__where={'$or': [{where: wheres[0]}, {where:wheres[1]}]} + # print(wheres) + lists=[] + for k in wheres: + lists.append({where:k}) + self.__where={'$or': lists} + # print(self.__where) + return self + __field={} + def field(self,field = "*"): + """设置过滤显示条件 + + 参数 field:str 字符串 + """ + if field and field!='*': + field=field.split(",") + zd={} + for f in field: + zd[f]=1 + self.__field=zd + return self + __limit=[] + def limit(self,offset, length = None): + """设置查询数量 + + 参数 offset:int 起始位置 + + 参数 length:int 查询数量 + """ + if length==None: + length=offset + offset=0 + elif offset > 0: + offset=offset*length-length + self.__limit=[offset,length] + return self + # def order(self,k): + # pass + def __operator(self,strs): + """运算符转换 + 参数 strs 待转的字符串 + 返回 已转换的运算符 + + 符号定义 + eq 等于 + neq 不等于 + gt 大于 + egt 大于等于 + lt 小于 + elt 小于等于 + """ + strss=strs.upper() + if strss == 'NEQ': + k='$ne' + elif strss == 'GT': + k='$gt' + elif strss == 'EGT': + k='$gte' + elif strss == 'LT': + k='$lt' + elif strss == 'ELT': + k='$lte' + else: + k=strss + return k \ No newline at end of file diff --git a/utill/db/mysql.py b/utill/db/mysql.py new file mode 100644 index 0000000..8bfaae3 --- /dev/null +++ b/utill/db/mysql.py @@ -0,0 +1,1020 @@ +# -*- coding: utf-8 -*- +from .pymysql import connect,escape_string +# import config.conf as config +import kcweb.config as config +import time,traceback,decimal,random +dbconfig=config.database +class mysql: + """数据库实例""" + __config=dbconfig + __conn={} #数据库链接对象 + __cursor=None #游标对象 + __errorcount=dbconfig['break'] #允许最大链接错误次数 + __errorcounts=0 #默认链接错误次数 + __dbObjcount=dbconfig['dbObjcount'] #数据库链接实例数量 + __sql='' + __sqls='' + __masteridentifier='' # 主服务器标识 + __slaveidentifier='' # 从服务器标识 + def __del__(self): + if not self.__config['pattern'] and self.__conn: + try: + self.__conn.close() + except Exception as e: + print("关闭失败",e) + __dbcount=1 + def __setdbcount(self): + "设置数据库配置总数量" + if isinstance(self.__config['host'],str): + self.__config['host']=[self.__config['host']] + if isinstance(self.__config['port'],str): + self.__config['port']=[self.__config['port']] + if isinstance(self.__config['user'],str): + self.__config['user']=[self.__config['user']] + if isinstance(self.__config['password'],str): + self.__config['password']=[self.__config['password']] + if isinstance(self.__config['db'],str): + self.__config['db']=[self.__config['db']] + host=len(self.__config['host']) + port=len(self.__config['port']) + user=len(self.__config['user']) + password=len(self.__config['password']) + db=len(self.__config['db']) + lists=[host,port,user,password,db] + lists.sort() + self.__dbcount=lists[0] + def __closeconn(self,identifier): + "长链接模式下,关闭链接池的链接" + if self.__config['pattern']: + if identifier in mysql.__conn: + for k in mysql.__conn[identifier]: + # print(identifier) + try: + k['obj'].close() + print(k,"关闭成功") + except: + print(k,"关闭错误") + mysql.__conn[identifier]=[] + __dbobjident=None #集中式(单一服务器)并且长连接模式下随机服务器链接标识 和 分布式(主从服务器)模式下随机服务器链接标识 + def __connects(self,typess="DQL"): + """设置数据库链接 + + 参数 typess :数据查询语言DQL,数据操纵语言DML,数据定义语言DDL,数据控制语言DCL + """ + + try: + if self.__config['deploy']==0: # 集中式(单一服务器) + if self.__config['pattern']: # 长连接情况下 + self.__masteridentifier=self.__config['host'][0]+str(self.__config['port'][0])+self.__config['db'][0] # 服务器标识 + if self.__masteridentifier not in mysql.__conn or len(mysql.__conn[self.__masteridentifier])<1: + i=0 + masterlistsdb=[] + while iself.__config['dbObjcount'] * self.__dbcount: #长连接情况下如果错误次数超过数据实例数量 关闭使用连接进行重连接 + self.__patternerrorcount=0 + if self.__config['deploy'] == 1: #分布式(主从服务器) 情况下 + print("数据库连接失效,关闭主从连接池后重新连接") + self.__closeconn(self.__masteridentifier) + self.__closeconn(self.__slaveidentifier) + time.sleep(10) + # mysql.__conn=[] #父类数据库实例 + self.__connects(typess) + self.__execute(typess) + else: + print("数据库连接失效,关闭主连接池后重新连接") + self.__closeconn(self.__masteridentifier) + time.sleep(10) + # mysql.__conn=[] #父类数据库实例 + self.__connects(typess) + self.__execute(typess) + else: + self.__patternerrorcount=self.__patternerrorcount+1 + self.__execute(typess) + else: + self.__conn[bs][self.__dbobjident]['error']=self.__conn[bs][self.__dbobjident]['error']+1 #当前数据库连接实例异常错误数量 + if self.__conn[bs][self.__dbobjident]['error'] > 2: + try: + mysql.__conn[bs][self.__dbobjident]['obj'].close() #关闭当前实例 + except Exception as e: + print("关闭异常",e) + # self.__conn[bs].pos(self.__dbobjident) #从列表中删除 + # if errorcodes == 2013: + #创建一个新的数据库实例 + if types=='master': + s=random.randint(0,self.__config['master_num']-1) + else: + s=random.randint(self.__dbcount-self.__config['master_num']-1,self.__dbcount-1) + obj=connect(host=self.__config['host'][s], port=self.__config['port'][s], user=self.__config['user'][s], password=self.__config['password'][s], db=self.__config['db'][s], charset=self.__config['charset']) + mysql.__conn[bs][self.__dbobjident]['obj']=obj + mysql.__conn[bs][self.__dbobjident]['error']=0 + print("已重新创建一个新的数据库实例",mysql.__conn) + self.__execute(typess) + else: # 短连接情况下 + print("服务器正在被关闭,关闭当前连接后重试") + try: + mysql.__conn.close() #关闭当前实例 + except Exception as e: + print("关闭异常",e) + # mysql.__conn=[] #父类数据库实例 + self.__connects(typess) + self.__execute(typess) + else: + raise Exception(e) + else: + self.__patternerrorcount=0 + return res + + def query(self,sql): + """执行sql语句 注:只支持单一服务器模式 + + 参数 sql 字符串 + + 返回 列表 或 数字 + """ + self.__sql=sql + res=self.__execute('DQL') + description=self.__cursor.description #获取字段 + result = self.__cursor.fetchall() #获取查询结果 + # print(result) + self.__cursor.close() + if description is None: + return res + else: + lists=[] + data_dict=[] + for field in description:#获取字段 + data_dict.append(field[0]) + for k in result: + i=0 + dicts={} + for j in k: + dicts[data_dict[i]]=j + i=i+1 + lists.append(dicts) + return lists + def execute(self,sql): + """执行sql语句 注:只支持单一服务器模式 + + 参数 sql 字符串 + + 返回 列表 或 数字 + """ + self.__sql=sql + res=self.__execute('DML') + description=self.__cursor.description #获取字段 + result = self.__cursor.fetchall() #获取查询结果 + # print(result) + self.__cursor.close() + if description is None: + return res + else: + lists=[] + data_dict=[] + for field in description:#获取字段 + data_dict.append(field[0]) + for k in result: + i=0 + dicts={} + for j in k: + dicts[data_dict[i]]=j + i=i+1 + lists.append(dicts) + return lists + + + def select(self,id=None): + """select查询 + + 返回 list(列表) + """ + if id : + self.__where="id=%d" % id + self.__setsql() + if self.__buildSql: + self.__sqls="("+self.__sql+")" + self.__None() + return self.__sqls + + self.__execute() + description=self.__cursor.description #获取字段 + result = self.__cursor.fetchall() #获取查询结果 + # print(result) + self.__cursor.close() + lists=[] + keys =[] + for field in description:#获取字段 + keys.append(field[0]) + key_number = len(keys) + for row in result: + item = dict() + for q in range(key_number): + k=row[q] + if type(row[q])==decimal.Decimal: + k=float(row[q]) + item[keys[q]] = k + lists.append(item) + return lists + def find(self,id=None): + """查询一条记录 + + 返回 字典 + """ + if id : + self.__where="id=%s" % id + self.limit(1) + self.__setsql() + if self.__buildSql: + self.__sqls="("+self.__sql+")" + self.__None() + return self.__sqls + self.__execute() + description=self.__cursor.description #获取字段 + result = self.__cursor.fetchall() #获取查询结果 + # print(result) + self.__cursor.close() + + item = dict() + keys =[] + for field in description:#获取字段 + keys.append(field[0]) + key_number = len(keys) + for row in result: + for q in range(key_number): + k=row[q] + if type(row[q])==decimal.Decimal: + k=float(row[q]) + item[keys[q]] = k + return item + + def count(self,field="*"): + """查询数量 + + 返回 int 数字 + """ + self.__field=field + self.__setsql('count') + if self.__buildSql: + self.__sqls="("+self.__sql+")" + return self.__sql + self.__execute() + result = self.__cursor.fetchall() #获取查询结果 + self.__cursor.close() + cou=int(result[0][0]) + return cou + def max(self,field): + """查询某字段的最大值 + + 返回 int 数字 + """ + self.__field=field + self.__setsql('max') + if self.__buildSql: + self.__sqls="("+self.__sql+")" + return self.__sql + self.__execute() + result = self.__cursor.fetchall() #获取查询结果 + self.__cursor.close() + cou=int(result[0][0]) + return cou + def min(self,field): + """查询某字段的最小值 + + 返回 int 数字 + """ + self.__field=field + self.__setsql('min') + if self.__buildSql: + self.__sqls="("+self.__sql+")" + return self.__sql + self.__execute() + result = self.__cursor.fetchall() #获取查询结果 + self.__cursor.close() + cou=int(result[0][0]) + return cou + def avg(self,field): + """查询某字段的平均值 + + 返回 int 数字 + """ + self.__field=field + self.__setsql('avg') + if self.__buildSql: + self.__sqls="("+self.__sql+")" + return self.__sql + self.__execute() + result = self.__cursor.fetchall() #获取查询结果 + self.__cursor.close() + cou=int(result[0][0]) + return cou + def sum(self,field): + """查询某字段之和 + + 返回 int 数字 + """ + self.__field=field + self.__setsql('sum') + if self.__buildSql: + self.__sqls="("+self.__sql+")" + return self.__sql + self.__execute() + result = self.__cursor.fetchall() #获取查询结果 + self.__cursor.close() + cou=int(result[0][0]) + return cou + + + def update(self,data,affair=False): + """数据表更新 + + 参数 data 要更新的内容 格式:{"name":"测试","age":20} + + 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 + """ + self.__setsql('update',data) + res=self.__execute('DML') + if affair==False and self.__startTrans==False: + if not self.__config['pattern']: + self.__conn.commit() + else: + self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() + self.__cursor.close() + return res + def delete(self,affair=False): + """数据表删除 + + 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 + """ + self.__setsql('delete') + if self.__where: + res=self.__execute('DML') + else: + return 0 + if affair==False and self.__startTrans==False: + if not self.__config['pattern']: + self.__conn.commit() + else: + self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() + self.__cursor.close() + return res + def insert(self,dicts,affair=False): + """插入数据库 单条插入或多条插入 + + 参数 dicts 要插入的内容 单条格式:{"name":"测试","age":20} 。 多条格式:[{"name":"测试","age":20},{"name":"测试","age":20}] + + 参数 affair 是否开启事务 True表示手动提交事务 False表示自动提交事务 + + 返回插入的数量 + """ + self.__setsql('insert',dicts) + res=self.__execute('DML') + if affair==False and self.__startTrans==False: + if not self.__config['pattern']: + self.__conn.commit() + else: + self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() + self.__cursor.close() + return res + + __startTrans=False + def startTrans(self): + "开启事务,仅对 update方法、delete方法、install方法有效" + self.__startTrans=True + def commit(self): + """事务提交 + + 增删改后的任务进行提交 + """ + if not self.__config['pattern']: + self.__conn.commit() + else: + self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].commit() + + def rollback(self): + """事务回滚 + + 增删改后的任务进行撤销 + """ + if not self.__config['pattern']: + self.__conn.rollback() + else: + self.__conn[self.__masteridentifier][self.__dbobjident]['obj'].rollback() + def getsql(self): + """得到生成的sql语句""" + return self.__sql + __buildSql=None + def buildSql(self): + """构造子查询""" + self.__buildSql=True + return self + def __None(self): + "清除所有赋值条件" + self.__lock=None + self.__distinct=None + self.__join=None + self.__joinstr='' + self.__alias=None + self.__having=None + self.__group=None + self.__group1=None + self.__order=None + self.__order1=None + self.__limit=None + self.__field="*" + self.__where=None + self.__wheres=() + self.__table=None + self.__buildSql=None + self.__table=None + + __where=None + __wheres=() + def where(self,where = None,*wheres): + """设置过滤条件 + + 传入方式: + "id",2 表示id='2' + + "id","in",2,3,4,5,6,...表示 id in (2,3,4,5,6,...) + + "id","in",[2,3,4,5,6,...]表示 id in (2,3,4,5,6,...) + + + [("id","gt",6000),"and",("name","like","%超")] 表示 ( id > "6000" and name LIKE "%超" ) + + "id","eq",1 表示 id = '1' + + eq 等于 + neq 不等于 + gt 大于 + egt 大于等于 + lt 小于 + elt 小于等于 + like LIKE + """ + self.__where=where + self.__wheres=wheres + return self + __field='*' + def field(self,field = "*"): + """设置过滤显示条件 + + 参数 field:str 字符串 + """ + self.__field=field + return self + __limit=[] + def limit(self,offset, length = None): + """设置查询数量 + + 参数 offset:int 起始位置 + + 参数 length:int 查询数量 + """ + self.__limit=[offset,length] + return self + def page(self,pagenow=1, length = 20): + """设置分页查询 + + 参数 pagenow:int 页码 + + 参数 length:int 查询数量 + """ + offset=(pagenow-1)*length + self.__limit=[offset,length] + return self + __order=None + __order1=None + def order(self,strs=None,*strs1): + """设置排序查询 + + 传入方式: + + "id desc" + + "id",'name','appkey','asc' + + "id",'name','appkey' 不包含asc或desc的情况下 默认是desc + + ['id','taskid',{"task_id":"desc"}] + """ + self.__order=strs + self.__order1=strs1 + return self + __group=None + __group1=None + def group(self,strs=None,*strs1): + """设置分组查询 + + 传入方式: + + "id,name" + + "id","name" + """ + self.__group=strs + self.__group1=strs1 + return self + __having=None + def having(self,strs=None): + """用于配合group方法完成从分组的结果中筛选(通常是聚合条件)数据 + + 参数 strs:string 如:"count(time)>3" + """ + self.__having=strs + return self + __alias=None + def alias(self,strs=None): + """用于设置当前数据表的别名,便于使用其他的连贯操作例如join方法等。 + + 参数 strs:string 默认当前表作为别名 + """ + if strs: + self.__alias=strs + else: + self.__alias=self.__table + return self + __join=None + __joinstr='' + def join(self,strs,on=None,types='INNER'): + """用于根据两个或多个表中的列之间的关系,从这些表中查询数据 + + 参数 strs string 如:"test t1" test表设置别名t1 + + 参数 on string 如:"t1.id=t2.pid" 设置连接条件 + + 参数 types 支持INNER、LEFT、RIGHT、FULL 默认INNER + + """ + joinstr='' + if strs and on: + joinstr=joinstr+types+" JOIN "+strs+" ON "+on+" " + if joinstr: + self.__joinstr=self.__joinstr+joinstr + return self + __distinct=None + def distinct(self,bools=None): + "用于返回唯一不同的值,配合field方法使用生效,来消除所有重复的记录,并只获取唯一一次记录。" + self.__distinct=bools + return self + __lock=None + def lock(self,strs=None): + """用于数据库的锁机制,在查询或者执行操作的时候使用 + + 排他锁 (FOR UPDATE) + + 共享锁 (lock in share mode) + + 参数 strs 如:True表示自动在生成的SQL语句最后加上FOR UPDATE, + + + """ + self.__lock=strs + return self + + # __cache=[] + # def cache(self,endtime,tag=None): + # """设置查询缓存 + + # 参数 endtime:int 缓存数据 0永久 + + # 参数 tag:int 缓存标签 + # """ + # self.__cache=[endtime,tag] + # return self + def __setsql(self,types=None,data = {}): + """生成sql语句""" + if types==None: + self.__sql="SELECT" + if self.__distinct and self.__field: + self.__sql=self.__sql+" DISTINCT" + if self.__alias: + self.__sql=self.__sql+" %s FROM %s %s" % (self.__field,self.__table,self.__alias) + else: + self.__sql=self.__sql+" %s FROM %s" % (self.__field,self.__table) + elif types=='count': + self.__sql="SELECT COUNT(%s) FROM %s" % (self.__field,self.__table) + elif types=='max': + self.__sql="SELECT MAX(%s) FROM %s" % (self.__field,self.__table) + elif types=='min': + self.__sql="SELECT MIN(%s) FROM %s" % (self.__field,self.__table) + elif types=='avg': + self.__sql="SELECT AVG(%s) FROM %s" % (self.__field,self.__table) + elif types=='sum': + self.__sql="SELECT SUM(%s) FROM %s" % (self.__field,self.__table) + elif types=='update': + strs='' + for k in data: + if isinstance(data[k],str): + strs=strs+" %s = '%s' ," % (k,escape_string(data[k])) + else: + strs=strs+" %s = %s ," % (k,data[k]) + strs=strs[:-1] + self.__sql="UPDATE %s SET %s" % (self.__table,strs) + elif types=='delete': + self.__sql="DELETE FROM %s" % self.__table + elif types=='insert': + if isinstance(data,dict): + strs='' + val='' + for k in data: + strs=strs+"%s," % k + if isinstance(data[k],str): + val=val+"'%s'," % escape_string(data[k]) + else: + val=val+"%s," % data[k] + strs=strs[:-1] + val=val[:-1] + self.__sql="INSERT INTO %s (%s) VALUES (%s)" % (self.__table,strs,val) + elif isinstance(data,list): + strs='' + val='(' + for k in data[0]: + strs=strs+" , "+k + for k in data: + for j in k: + if isinstance(k[j],str): + val=val+"'"+str(k[j])+"'," + else: + val=val+str(k[j])+"," + val=val[:-1] + val=val+"),(" + val=val[:-2] + self.__sql="INSERT INTO "+self.__table+" ("+strs[3:]+") VALUES "+val + if self.__joinstr: + self.__sql=self.__sql+" "+self.__joinstr + if self.__where: + if isinstance(self.__where,str): + if self.__wheres: + if len(self.__wheres) == 2: + if isinstance(self.__wheres[1],list): + self.__sql=self.__sql + " WHERE %s %s (" % (self.__where,self.__operator(self.__wheres[0])) + for k in self.__wheres[1]: + self.__sql=self.__sql+str(k)+"," + self.__sql=self.__sql[:-1]+")" + else: + self.__sql=self.__sql + " WHERE %s %s '%s'" % (self.__where,self.__operator(self.__wheres[0]),self.__wheres[1]) + elif len(self.__wheres) > 2: + if self.__wheres[0]=='in': + strs=str(self.__wheres[1]) + i=0 + for k in self.__wheres: + if i > 1: + strs=strs+","+str(k) + i=i+1 + self.__sql=self.__sql + " WHERE %s in (%s)" % (self.__where,strs) + else: + self.__sql=self.__sql + " WHERE %s = '%s'" % (self.__where,self.__wheres[0]) + else: + self.__sql=self.__sql + " WHERE %s" % self.__where + elif isinstance(self.__where,list): + self.__sql=self.__sql + " WHERE %s" % self.__listTrans() + else: + print("参数where类型错误") + if self.__order: + s='' + if isinstance(self.__order,list): + for strs in self.__order: + if isinstance(strs,str): + s=s+strs+"," + else: + pass + for key in strs: + s=s+key+" "+strs[key] + s=s+"," + s=s[:-1] + if isinstance(self.__order,str): + if self.__order1: + if len(self.__order1) > 1: + if self.__order1[len(self.__order1)-1] == 'desc' or self.__order1[len(self.__order1)-1] == 'asc': + i=0 + while iLL", hash_pass) + hash_message_n = struct.unpack(">LL", hash_message) + + rand_st = RandStruct_323( + hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1] + ) + outbuf = io.BytesIO() + for _ in range(min(SCRAMBLE_LENGTH_323, len(message))): + outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) + extra = int2byte(int(rand_st.my_rnd() * 31)) + out = outbuf.getvalue() + outbuf = io.BytesIO() + for c in out: + outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) + return outbuf.getvalue() + + +def _hash_password_323(password): + nr = 1345345333 + add = 7 + nr2 = 0x12345671 + + # x in py3 is numbers, p27 is chars + for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]: + nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF + nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF + add = (add + c) & 0xFFFFFFFF + + r1 = nr & ((1 << 31) - 1) # kill sign bits + r2 = nr2 & ((1 << 31) - 1) + return struct.pack(">LL", r1, r2) + + +# sha256_password + + +def _roundtrip(conn, send_data): + conn.write_packet(send_data) + pkt = conn._read_packet() + pkt.check_error() + return pkt + + +def _xor_password(password, salt): + password_bytes = bytearray(password) + salt = bytearray(salt) # for PY2 compat. + salt_len = len(salt) + for i in range(len(password_bytes)): + password_bytes[i] ^= salt[i % salt_len] + return bytes(password_bytes) + + +def sha2_rsa_encrypt(password, salt, public_key): + """Encrypt password with salt and public_key. + + Used for sha256_password and caching_sha2_password. + """ + if not _have_cryptography: + raise RuntimeError("cryptography is required for sha256_password or caching_sha2_password") + message = _xor_password(password + b'\0', salt) + rsa_key = serialization.load_pem_public_key(public_key, default_backend()) + return rsa_key.encrypt( + message, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA1()), + algorithm=hashes.SHA1(), + label=None, + ), + ) + + +def sha256_password_auth(conn, pkt): + if conn._secure: + if DEBUG: + print("sha256: Sending plain password") + data = conn.password + b'\0' + return _roundtrip(conn, data) + + if pkt.is_auth_switch_request(): + conn.salt = pkt.read_all() + if not conn.server_public_key and conn.password: + # Request server public key + if DEBUG: + print("sha256: Requesting server public key") + pkt = _roundtrip(conn, b'\1') + + if pkt.is_extra_auth_data(): + conn.server_public_key = pkt._data[1:] + if DEBUG: + print("Received public key:\n", conn.server_public_key.decode('ascii')) + + if conn.password: + if not conn.server_public_key: + raise OperationalError("Couldn't receive server's public key") + + data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) + else: + data = b'' + + return _roundtrip(conn, data) + + +def scramble_caching_sha2(password, nonce): + # (bytes, bytes) -> bytes + """Scramble algorithm used in cached_sha2_password fast path. + + XOR(SHA256(password), SHA256(SHA256(SHA256(password)), nonce)) + """ + if not password: + return b'' + + p1 = hashlib.sha256(password).digest() + p2 = hashlib.sha256(p1).digest() + p3 = hashlib.sha256(p2 + nonce).digest() + + res = bytearray(p1) + if PY2: + p3 = bytearray(p3) + for i in range(len(p3)): + res[i] ^= p3[i] + + return bytes(res) + + +def caching_sha2_password_auth(conn, pkt): + # No password fast path + if not conn.password: + return _roundtrip(conn, b'') + + if pkt.is_auth_switch_request(): + # Try from fast auth + if DEBUG: + print("caching sha2: Trying fast path") + conn.salt = pkt.read_all() + scrambled = scramble_caching_sha2(conn.password, conn.salt) + pkt = _roundtrip(conn, scrambled) + # else: fast auth is tried in initial handshake + + if not pkt.is_extra_auth_data(): + raise OperationalError( + "caching sha2: Unknown packet for fast auth: %s" % pkt._data[:1] + ) + + # magic numbers: + # 2 - request public key + # 3 - fast auth succeeded + # 4 - need full auth + + pkt.advance(1) + n = pkt.read_uint8() + + if n == 3: + if DEBUG: + print("caching sha2: succeeded by fast path.") + pkt = conn._read_packet() + pkt.check_error() # pkt must be OK packet + return pkt + + if n != 4: + raise OperationalError("caching sha2: Unknwon result for fast auth: %s" % n) + + if DEBUG: + print("caching sha2: Trying full auth...") + + if conn._secure: + if DEBUG: + print("caching sha2: Sending plain password via secure connection") + return _roundtrip(conn, conn.password + b'\0') + + if not conn.server_public_key: + pkt = _roundtrip(conn, b'\x02') # Request public key + if not pkt.is_extra_auth_data(): + raise OperationalError( + "caching sha2: Unknown packet for public key: %s" % pkt._data[:1] + ) + + conn.server_public_key = pkt._data[1:] + if DEBUG: + print(conn.server_public_key.decode('ascii')) + + data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) + pkt = _roundtrip(conn, data) diff --git a/utill/db/pymysql/_compat.py b/utill/db/pymysql/_compat.py new file mode 100644 index 0000000..252789e --- /dev/null +++ b/utill/db/pymysql/_compat.py @@ -0,0 +1,21 @@ +import sys + +PY2 = sys.version_info[0] == 2 +PYPY = hasattr(sys, 'pypy_translation_info') +JYTHON = sys.platform.startswith('java') +IRONPYTHON = sys.platform == 'cli' +CPYTHON = not PYPY and not JYTHON and not IRONPYTHON + +if PY2: + import __builtin__ + range_type = xrange + text_type = unicode + long_type = long + str_type = basestring + unichr = __builtin__.unichr +else: + range_type = range + text_type = str + long_type = int + str_type = str + unichr = chr diff --git a/utill/db/pymysql/_socketio.py b/utill/db/pymysql/_socketio.py new file mode 100644 index 0000000..6a11d42 --- /dev/null +++ b/utill/db/pymysql/_socketio.py @@ -0,0 +1,134 @@ +""" +SocketIO imported from socket module in Python 3. + +Copyright (c) 2001-2013 Python Software Foundation; All Rights Reserved. +""" + +from socket import * +import io +import errno + +__all__ = ['SocketIO'] + +EINTR = errno.EINTR +_blocking_errnos = (errno.EAGAIN, errno.EWOULDBLOCK) + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise IOError("cannot read from timed out object") + while True: + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except error as e: + n = e.args[0] + if n == EINTR: + continue + if n in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.args[0] in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + diff --git a/utill/db/pymysql/charset.py b/utill/db/pymysql/charset.py new file mode 100644 index 0000000..07d8063 --- /dev/null +++ b/utill/db/pymysql/charset.py @@ -0,0 +1,212 @@ +MBLENGTH = { + 8:1, + 33:3, + 88:2, + 91:2 + } + + +class Charset(object): + def __init__(self, id, name, collation, is_default): + self.id, self.name, self.collation = id, name, collation + self.is_default = is_default == 'Yes' + + def __repr__(self): + return "Charset(id=%s, name=%r, collation=%r)" % ( + self.id, self.name, self.collation) + + @property + def encoding(self): + name = self.name + if name in ('utf8mb4', 'utf8mb3'): + return 'utf8' + return name + + @property + def is_binary(self): + return self.id == 63 + + +class Charsets: + def __init__(self): + self._by_id = {} + self._by_name = {} + + def add(self, c): + self._by_id[c.id] = c + if c.is_default: + self._by_name[c.name] = c + + def by_id(self, id): + return self._by_id[id] + + def by_name(self, name): + return self._by_name.get(name.lower()) + +_charsets = Charsets() +""" +Generated with: + +mysql -N -s -e "select id, character_set_name, collation_name, is_default +from information_schema.collations order by id;" | python -c "import sys +for l in sys.stdin.readlines(): + id, name, collation, is_default = l.split(chr(9)) + print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \ + % (id, name, collation, is_default.strip()) +" + +""" +_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes')) +_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', '')) +_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes')) +_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes')) +_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', '')) +_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes')) +_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes')) +_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes')) +_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes')) +_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes')) +_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes')) +_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes')) +_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes')) +_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', '')) +_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', '')) +_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes')) +_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes')) +_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes')) +_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', '')) +_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', '')) +_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes')) +_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', '')) +_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes')) +_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes')) +_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes')) +_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', '')) +_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes')) +_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', '')) +_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes')) +_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) +_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) +_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) +_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) +_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) +_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) +_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) +_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes')) +_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes')) +_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes')) +_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', '')) +_charsets.add(Charset(43, 'macce', 'macce_bin', '')) +_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', '')) +_charsets.add(Charset(45, 'utf8mb4', 'utf8mb4_general_ci', 'Yes')) +_charsets.add(Charset(46, 'utf8mb4', 'utf8mb4_bin', '')) +_charsets.add(Charset(47, 'latin1', 'latin1_bin', '')) +_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', '')) +_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', '')) +_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) +_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) +_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) +_charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) +_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) +_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) +_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) +_charsets.add(Charset(63, 'binary', 'binary', 'Yes')) +_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) +_charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) +_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', '')) +_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', '')) +_charsets.add(Charset(68, 'cp866', 'cp866_bin', '')) +_charsets.add(Charset(69, 'dec8', 'dec8_bin', '')) +_charsets.add(Charset(70, 'greek', 'greek_bin', '')) +_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', '')) +_charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) +_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) +_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) +_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) +_charsets.add(Charset(76, 'utf8', 'utf8_tolower_ci', '')) +_charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) +_charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) +_charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) +_charsets.add(Charset(80, 'cp850', 'cp850_bin', '')) +_charsets.add(Charset(81, 'cp852', 'cp852_bin', '')) +_charsets.add(Charset(82, 'swe7', 'swe7_bin', '')) +_charsets.add(Charset(83, 'utf8', 'utf8_bin', '')) +_charsets.add(Charset(84, 'big5', 'big5_bin', '')) +_charsets.add(Charset(85, 'euckr', 'euckr_bin', '')) +_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) +_charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) +_charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) +_charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) +_charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) +_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) +_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) +_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', '')) +_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes')) +_charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) +_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) +_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) +_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) +_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) +_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) +_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) +_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', '')) +_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', '')) +_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', '')) +_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', '')) +_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', '')) +_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', '')) +_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', '')) +_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', '')) +_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', '')) +_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', '')) +_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', '')) +_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', '')) +_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', '')) +_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) +_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) +_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) +_charsets.add(Charset(211, 'utf8', 'utf8_sinhala_ci', '')) +_charsets.add(Charset(212, 'utf8', 'utf8_german2_ci', '')) +_charsets.add(Charset(213, 'utf8', 'utf8_croatian_ci', '')) +_charsets.add(Charset(214, 'utf8', 'utf8_unicode_520_ci', '')) +_charsets.add(Charset(215, 'utf8', 'utf8_vietnamese_ci', '')) +_charsets.add(Charset(223, 'utf8', 'utf8_general_mysql500_ci', '')) +_charsets.add(Charset(224, 'utf8mb4', 'utf8mb4_unicode_ci', '')) +_charsets.add(Charset(225, 'utf8mb4', 'utf8mb4_icelandic_ci', '')) +_charsets.add(Charset(226, 'utf8mb4', 'utf8mb4_latvian_ci', '')) +_charsets.add(Charset(227, 'utf8mb4', 'utf8mb4_romanian_ci', '')) +_charsets.add(Charset(228, 'utf8mb4', 'utf8mb4_slovenian_ci', '')) +_charsets.add(Charset(229, 'utf8mb4', 'utf8mb4_polish_ci', '')) +_charsets.add(Charset(230, 'utf8mb4', 'utf8mb4_estonian_ci', '')) +_charsets.add(Charset(231, 'utf8mb4', 'utf8mb4_spanish_ci', '')) +_charsets.add(Charset(232, 'utf8mb4', 'utf8mb4_swedish_ci', '')) +_charsets.add(Charset(233, 'utf8mb4', 'utf8mb4_turkish_ci', '')) +_charsets.add(Charset(234, 'utf8mb4', 'utf8mb4_czech_ci', '')) +_charsets.add(Charset(235, 'utf8mb4', 'utf8mb4_danish_ci', '')) +_charsets.add(Charset(236, 'utf8mb4', 'utf8mb4_lithuanian_ci', '')) +_charsets.add(Charset(237, 'utf8mb4', 'utf8mb4_slovak_ci', '')) +_charsets.add(Charset(238, 'utf8mb4', 'utf8mb4_spanish2_ci', '')) +_charsets.add(Charset(239, 'utf8mb4', 'utf8mb4_roman_ci', '')) +_charsets.add(Charset(240, 'utf8mb4', 'utf8mb4_persian_ci', '')) +_charsets.add(Charset(241, 'utf8mb4', 'utf8mb4_esperanto_ci', '')) +_charsets.add(Charset(242, 'utf8mb4', 'utf8mb4_hungarian_ci', '')) +_charsets.add(Charset(243, 'utf8mb4', 'utf8mb4_sinhala_ci', '')) +_charsets.add(Charset(244, 'utf8mb4', 'utf8mb4_german2_ci', '')) +_charsets.add(Charset(245, 'utf8mb4', 'utf8mb4_croatian_ci', '')) +_charsets.add(Charset(246, 'utf8mb4', 'utf8mb4_unicode_520_ci', '')) +_charsets.add(Charset(247, 'utf8mb4', 'utf8mb4_vietnamese_ci', '')) +_charsets.add(Charset(248, 'gb18030', 'gb18030_chinese_ci', 'Yes')) +_charsets.add(Charset(249, 'gb18030', 'gb18030_bin', '')) +_charsets.add(Charset(250, 'gb18030', 'gb18030_unicode_520_ci', '')) +_charsets.add(Charset(255, 'utf8mb4', 'utf8mb4_0900_ai_ci', '')) + +charset_by_name = _charsets.by_name +charset_by_id = _charsets.by_id + + +#TODO: remove this +def charset_to_encoding(name): + """Convert MySQL's charset name to Python's codec name""" + if name in ('utf8mb4', 'utf8mb3'): + return 'utf8' + return name diff --git a/utill/db/pymysql/connections.py b/utill/db/pymysql/connections.py new file mode 100644 index 0000000..2e4122b --- /dev/null +++ b/utill/db/pymysql/connections.py @@ -0,0 +1,1279 @@ +# Python implementation of the MySQL client-server protocol +# http://dev.mysql.com/doc/internals/en/client-server-protocol.html +# Error codes: +# http://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html +from __future__ import print_function +from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON + +import errno +import io +import os +import socket +import struct +import sys +import traceback +import warnings + +from . import _auth + +from .charset import charset_by_name, charset_by_id +from .constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS +from . import converters +from .cursors import Cursor +from .optionfile import Parser +from .protocol import ( + dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper, + EOFPacketWrapper, LoadLocalPacketWrapper +) +from .util import byte2int, int2byte +from . import err, VERSION_STRING + +try: + import ssl + SSL_ENABLED = True +except ImportError: + ssl = None + SSL_ENABLED = False + +try: + import getpass + DEFAULT_USER = getpass.getuser() + del getpass +except (ImportError, KeyError): + # KeyError occurs when there's no entry in OS database for a current user. + DEFAULT_USER = None + +DEBUG = False + +_py_version = sys.version_info[:2] + +if PY2: + pass +elif _py_version < (3, 6): + # See http://bugs.python.org/issue24870 + _surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)] + + def _fast_surrogateescape(s): + return s.decode('latin1').translate(_surrogateescape_table) +else: + def _fast_surrogateescape(s): + return s.decode('ascii', 'surrogateescape') + +# socket.makefile() in Python 2 is not usable because very inefficient and +# bad behavior about timeout. +# XXX: ._socketio doesn't work under IronPython. +if PY2 and not IRONPYTHON: + # read method of file-like returned by sock.makefile() is very slow. + # So we copy io-based one from Python 3. + from ._socketio import SocketIO + + def _makefile(sock, mode): + return io.BufferedReader(SocketIO(sock, mode)) +else: + # socket.makefile in Python 3 is nice. + def _makefile(sock, mode): + return sock.makefile(mode) + + +TEXT_TYPES = { + FIELD_TYPE.BIT, + FIELD_TYPE.BLOB, + FIELD_TYPE.LONG_BLOB, + FIELD_TYPE.MEDIUM_BLOB, + FIELD_TYPE.STRING, + FIELD_TYPE.TINY_BLOB, + FIELD_TYPE.VAR_STRING, + FIELD_TYPE.VARCHAR, + FIELD_TYPE.GEOMETRY, +} + + +DEFAULT_CHARSET = 'utf8mb4' + +MAX_PACKET_LEN = 2**24-1 + + +def pack_int24(n): + return struct.pack('`_ in the + specification. + """ + + _sock = None + _auth_plugin_name = '' + _closed = False + _secure = False + + def __init__(self, host=None, user=None, password="", + database=None, port=0, unix_socket=None, + charset='', sql_mode=None, + read_default_file=None, conv=None, use_unicode=None, + client_flag=0, cursorclass=Cursor, init_command=None, + connect_timeout=10, ssl=None, read_default_group=None, + compress=None, named_pipe=None, + autocommit=False, db=None, passwd=None, local_infile=False, + max_allowed_packet=16*1024*1024, defer_connect=False, + auth_plugin_map=None, read_timeout=None, write_timeout=None, + bind_address=None, binary_prefix=False, program_name=None, + server_public_key=None): + if use_unicode is None and sys.version_info[0] > 2: + use_unicode = True + + if db is not None and database is None: + database = db + if passwd is not None and not password: + password = passwd + + if compress or named_pipe: + raise NotImplementedError("compress and named_pipe arguments are not supported") + + self._local_infile = bool(local_infile) + if self._local_infile: + client_flag |= CLIENT.LOCAL_FILES + + if read_default_group and not read_default_file: + if sys.platform.startswith("win"): + read_default_file = "c:\\my.ini" + else: + read_default_file = "/etc/my.cnf" + + if read_default_file: + if not read_default_group: + read_default_group = "client" + + cfg = Parser() + cfg.read(os.path.expanduser(read_default_file)) + + def _config(key, arg): + if arg: + return arg + try: + return cfg.get(read_default_group, key) + except Exception: + return arg + + user = _config("user", user) + password = _config("password", password) + host = _config("host", host) + database = _config("database", database) + unix_socket = _config("socket", unix_socket) + port = int(_config("port", port)) + bind_address = _config("bind-address", bind_address) + charset = _config("default-character-set", charset) + if not ssl: + ssl = {} + if isinstance(ssl, dict): + for key in ["ca", "capath", "cert", "key", "cipher"]: + value = _config("ssl-" + key, ssl.get(key)) + if value: + ssl[key] = value + + self.ssl = False + if ssl: + if not SSL_ENABLED: + raise NotImplementedError("ssl module not found") + self.ssl = True + client_flag |= CLIENT.SSL + self.ctx = self._create_ssl_ctx(ssl) + + self.host = host or "localhost" + self.port = port or 3306 + self.user = user or DEFAULT_USER + self.password = password or b"" + if isinstance(self.password, text_type): + self.password = self.password.encode('latin1') + self.db = database + self.unix_socket = unix_socket + self.bind_address = bind_address + if not (0 < connect_timeout <= 31536000): + raise ValueError("connect_timeout should be >0 and <=31536000") + self.connect_timeout = connect_timeout or None + if read_timeout is not None and read_timeout <= 0: + raise ValueError("read_timeout should be >= 0") + self._read_timeout = read_timeout + if write_timeout is not None and write_timeout <= 0: + raise ValueError("write_timeout should be >= 0") + self._write_timeout = write_timeout + if charset: + self.charset = charset + self.use_unicode = True + else: + self.charset = DEFAULT_CHARSET + self.use_unicode = False + + if use_unicode is not None: + self.use_unicode = use_unicode + + self.encoding = charset_by_name(self.charset).encoding + + client_flag |= CLIENT.CAPABILITIES + if self.db: + client_flag |= CLIENT.CONNECT_WITH_DB + + self.client_flag = client_flag + + self.cursorclass = cursorclass + + self._result = None + self._affected_rows = 0 + self.host_info = "Not connected" + + # specified autocommit mode. None means use server default. + self.autocommit_mode = autocommit + + if conv is None: + conv = converters.conversions + + # Need for MySQLdb compatibility. + self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int} + self.decoders = {k: v for (k, v) in conv.items() if type(k) is int} + self.sql_mode = sql_mode + self.init_command = init_command + self.max_allowed_packet = max_allowed_packet + self._auth_plugin_map = auth_plugin_map or {} + self._binary_prefix = binary_prefix + self.server_public_key = server_public_key + + self._connect_attrs = { + '_client_name': 'pymysql', + '_pid': str(os.getpid()), + '_client_version': VERSION_STRING, + } + + if program_name: + self._connect_attrs["program_name"] = program_name + + if defer_connect: + self._sock = None + else: + self.connect() + + def _create_ssl_ctx(self, sslp): + if isinstance(sslp, ssl.SSLContext): + return sslp + ca = sslp.get('ca') + capath = sslp.get('capath') + hasnoca = ca is None and capath is None + ctx = ssl.create_default_context(cafile=ca, capath=capath) + ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True) + ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED + if 'cert' in sslp: + ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key')) + if 'cipher' in sslp: + ctx.set_ciphers(sslp['cipher']) + ctx.options |= ssl.OP_NO_SSLv2 + ctx.options |= ssl.OP_NO_SSLv3 + return ctx + + def close(self): + """ + Send the quit message and close the socket. + + See `Connection.close() `_ + in the specification. + + :raise Error: If the connection is already closed. + """ + if self._closed: + raise err.Error("Already closed") + self._closed = True + if self._sock is None: + return + send_data = struct.pack('`_ + in the specification. + """ + self._execute_command(COMMAND.COM_QUERY, "COMMIT") + self._read_ok_packet() + + def rollback(self): + """ + Roll back the current transaction. + + See `Connection.rollback() `_ + in the specification. + """ + self._execute_command(COMMAND.COM_QUERY, "ROLLBACK") + self._read_ok_packet() + + def show_warnings(self): + """Send the "SHOW WARNINGS" SQL command.""" + self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS") + result = MySQLResult(self) + result.read() + return result.rows + + def select_db(self, db): + """ + Set current db. + + :param db: The name of the db. + """ + self._execute_command(COMMAND.COM_INIT_DB, db) + self._read_ok_packet() + + def escape(self, obj, mapping=None): + """Escape whatever value you pass to it. + + Non-standard, for internal use; do not use this in your applications. + """ + if isinstance(obj, str_type): + return "'" + self.escape_string(obj) + "'" + if isinstance(obj, (bytes, bytearray)): + ret = self._quote_bytes(obj) + if self._binary_prefix: + ret = "_binary" + ret + return ret + return converters.escape_item(obj, self.charset, mapping=mapping) + + def literal(self, obj): + """Alias for escape() + + Non-standard, for internal use; do not use this in your applications. + """ + return self.escape(obj, self.encoders) + + def escape_string(self, s): + if (self.server_status & + SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): + return s.replace("'", "''") + return converters.escape_string(s) + + def _quote_bytes(self, s): + if (self.server_status & + SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): + return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),) + return converters.escape_bytes(s) + + def cursor(self, cursor=None): + """ + Create a new cursor to execute queries with. + + :param cursor: The type of cursor to create; one of :py:class:`Cursor`, + :py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`. + None means use Cursor. + """ + if cursor: + return cursor(self) + return self.cursorclass(self) + + def __enter__(self): + """Context manager that returns a Cursor""" + warnings.warn( + "Context manager API of Connection object is deprecated; Use conn.begin()", + DeprecationWarning) + return self.cursor() + + def __exit__(self, exc, value, traceback): + """On successful exit, commit. On exception, rollback""" + if exc: + self.rollback() + else: + self.commit() + + # The following methods are INTERNAL USE ONLY (called from Cursor) + def query(self, sql, unbuffered=False): + # if DEBUG: + # print("DEBUG: sending query:", sql) + if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON): + if PY2: + sql = sql.encode(self.encoding) + else: + sql = sql.encode(self.encoding, 'surrogateescape') + self._execute_command(COMMAND.COM_QUERY, sql) + self._affected_rows = self._read_query_result(unbuffered=unbuffered) + return self._affected_rows + + def next_result(self, unbuffered=False): + self._affected_rows = self._read_query_result(unbuffered=unbuffered) + return self._affected_rows + + def affected_rows(self): + return self._affected_rows + + def kill(self, thread_id): + arg = struct.pack('= 5: + self.client_flag |= CLIENT.MULTI_RESULTS + + if self.user is None: + raise ValueError("Did not specify a username") + + charset_id = charset_by_name(self.charset).id + if isinstance(self.user, text_type): + self.user = self.user.encode(self.encoding) + + data_init = struct.pack('=5.0) + data += authresp + b'\0' + + if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB: + if isinstance(self.db, text_type): + self.db = self.db.encode(self.encoding) + data += self.db + b'\0' + + if self.server_capabilities & CLIENT.PLUGIN_AUTH: + data += (plugin_name or b'') + b'\0' + + if self.server_capabilities & CLIENT.CONNECT_ATTRS: + connect_attrs = b'' + for k, v in self._connect_attrs.items(): + k = k.encode('utf-8') + connect_attrs += struct.pack('B', len(k)) + k + v = v.encode('utf-8') + connect_attrs += struct.pack('B', len(v)) + v + data += struct.pack('B', len(connect_attrs)) + connect_attrs + + self.write_packet(data) + auth_packet = self._read_packet() + + # if authentication method isn't accepted the first byte + # will have the octet 254 + if auth_packet.is_auth_switch_request(): + if DEBUG: print("received auth switch") + # https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest + auth_packet.read_uint8() # 0xfe packet identifier + plugin_name = auth_packet.read_string() + if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None: + auth_packet = self._process_auth(plugin_name, auth_packet) + else: + # send legacy handshake + data = _auth.scramble_old_password(self.password, self.salt) + b'\0' + self.write_packet(data) + auth_packet = self._read_packet() + elif auth_packet.is_extra_auth_data(): + if DEBUG: + print("received extra data") + # https://dev.mysql.com/doc/internals/en/successful-authentication.html + if self._auth_plugin_name == "caching_sha2_password": + auth_packet = _auth.caching_sha2_password_auth(self, auth_packet) + elif self._auth_plugin_name == "sha256_password": + auth_packet = _auth.sha256_password_auth(self, auth_packet) + else: + raise err.OperationalError("Received extra packet for auth method %r", self._auth_plugin_name) + + if DEBUG: print("Succeed to auth") + + def _process_auth(self, plugin_name, auth_packet): + handler = self._get_auth_plugin_handler(plugin_name) + if handler: + try: + return handler.authenticate(auth_packet) + except AttributeError: + if plugin_name != b'dialog': + raise err.OperationalError(2059, "Authentication plugin '%s'" + " not loaded: - %r missing authenticate method" % (plugin_name, type(handler))) + if plugin_name == b"caching_sha2_password": + return _auth.caching_sha2_password_auth(self, auth_packet) + elif plugin_name == b"sha256_password": + return _auth.sha256_password_auth(self, auth_packet) + elif plugin_name == b"mysql_native_password": + data = _auth.scramble_native_password(self.password, auth_packet.read_all()) + elif plugin_name == b"mysql_old_password": + data = _auth.scramble_old_password(self.password, auth_packet.read_all()) + b'\0' + elif plugin_name == b"mysql_clear_password": + # https://dev.mysql.com/doc/internals/en/clear-text-authentication.html + data = self.password + b'\0' + elif plugin_name == b"dialog": + pkt = auth_packet + while True: + flag = pkt.read_uint8() + echo = (flag & 0x06) == 0x02 + last = (flag & 0x01) == 0x01 + prompt = pkt.read_all() + + if prompt == b"Password: ": + self.write_packet(self.password + b'\0') + elif handler: + resp = 'no response - TypeError within plugin.prompt method' + try: + resp = handler.prompt(echo, prompt) + self.write_packet(resp + b'\0') + except AttributeError: + raise err.OperationalError(2059, "Authentication plugin '%s'" \ + " not loaded: - %r missing prompt method" % (plugin_name, handler)) + except TypeError: + raise err.OperationalError(2061, "Authentication plugin '%s'" \ + " %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt)) + else: + raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler)) + pkt = self._read_packet() + pkt.check_error() + if pkt.is_ok_packet() or last: + break + return pkt + else: + raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name) + + self.write_packet(data) + pkt = self._read_packet() + pkt.check_error() + return pkt + + def _get_auth_plugin_handler(self, plugin_name): + plugin_class = self._auth_plugin_map.get(plugin_name) + if not plugin_class and isinstance(plugin_name, bytes): + plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii')) + if plugin_class: + try: + handler = plugin_class(self) + except TypeError: + raise err.OperationalError(2059, "Authentication plugin '%s'" + " not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class)) + else: + handler = None + return handler + + # _mysql support + def thread_id(self): + return self.server_thread_id[0] + + def character_set_name(self): + return self.charset + + def get_host_info(self): + return self.host_info + + def get_proto_info(self): + return self.protocol_version + + def _get_server_information(self): + i = 0 + packet = self._read_packet() + data = packet.get_all_data() + + self.protocol_version = byte2int(data[i:i+1]) + i += 1 + + server_end = data.find(b'\0', i) + self.server_version = data[i:server_end].decode('latin1') + i = server_end + 1 + + self.server_thread_id = struct.unpack('= i + 6: + lang, stat, cap_h, salt_len = struct.unpack('= i + salt_len: + # salt_len includes auth_plugin_data_part_1 and filler + self.salt += data[i:i+salt_len] + i += salt_len + + i+=1 + # AUTH PLUGIN NAME may appear here. + if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i: + # Due to Bug#59453 the auth-plugin-name is missing the terminating + # NUL-char in versions prior to 5.5.10 and 5.6.2. + # ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake + # didn't use version checks as mariadb is corrected and reports + # earlier than those two. + server_end = data.find(b'\0', i) + if server_end < 0: # pragma: no cover - very specific upstream bug + # not found \0 and last field so take it all + self._auth_plugin_name = data[i:].decode('utf-8') + else: + self._auth_plugin_name = data[i:server_end].decode('utf-8') + + def get_server_info(self): + return self.server_version + + Warning = err.Warning + Error = err.Error + InterfaceError = err.InterfaceError + DatabaseError = err.DatabaseError + DataError = err.DataError + OperationalError = err.OperationalError + IntegrityError = err.IntegrityError + InternalError = err.InternalError + ProgrammingError = err.ProgrammingError + NotSupportedError = err.NotSupportedError + + +class MySQLResult(object): + + def __init__(self, connection): + """ + :type connection: Connection + """ + self.connection = connection + self.affected_rows = None + self.insert_id = None + self.server_status = None + self.warning_count = 0 + self.message = None + self.field_count = 0 + self.description = None + self.rows = None + self.has_next = None + self.unbuffered_active = False + + def __del__(self): + if self.unbuffered_active: + self._finish_unbuffered_query() + + def read(self): + try: + first_packet = self.connection._read_packet() + + if first_packet.is_ok_packet(): + self._read_ok_packet(first_packet) + elif first_packet.is_load_local_packet(): + self._read_load_local_packet(first_packet) + else: + self._read_result_packet(first_packet) + finally: + self.connection = None + + def init_unbuffered_query(self): + """ + :raise OperationalError: If the connection to the MySQL server is lost. + :raise InternalError: + """ + self.unbuffered_active = True + first_packet = self.connection._read_packet() + + if first_packet.is_ok_packet(): + self._read_ok_packet(first_packet) + self.unbuffered_active = False + self.connection = None + elif first_packet.is_load_local_packet(): + self._read_load_local_packet(first_packet) + self.unbuffered_active = False + self.connection = None + else: + self.field_count = first_packet.read_length_encoded_integer() + self._get_descriptions() + + # Apparently, MySQLdb picks this number because it's the maximum + # value of a 64bit unsigned integer. Since we're emulating MySQLdb, + # we set it to this instead of None, which would be preferred. + self.affected_rows = 18446744073709551615 + + def _read_ok_packet(self, first_packet): + ok_packet = OKPacketWrapper(first_packet) + self.affected_rows = ok_packet.affected_rows + self.insert_id = ok_packet.insert_id + self.server_status = ok_packet.server_status + self.warning_count = ok_packet.warning_count + self.message = ok_packet.message + self.has_next = ok_packet.has_next + + def _read_load_local_packet(self, first_packet): + if not self.connection._local_infile: + raise RuntimeError( + "**WARN**: Received LOAD_LOCAL packet but local_infile option is false.") + load_packet = LoadLocalPacketWrapper(first_packet) + sender = LoadLocalFile(load_packet.filename, self.connection) + try: + sender.send_data() + except: + self.connection._read_packet() # skip ok packet + raise + + ok_packet = self.connection._read_packet() + if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error + raise err.OperationalError(2014, "Commands Out of Sync") + self._read_ok_packet(ok_packet) + + def _check_packet_is_eof(self, packet): + if not packet.is_eof_packet(): + return False + #TODO: Support CLIENT.DEPRECATE_EOF + # 1) Add DEPRECATE_EOF to CAPABILITIES + # 2) Mask CAPABILITIES with server_capabilities + # 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper + wp = EOFPacketWrapper(packet) + self.warning_count = wp.warning_count + self.has_next = wp.has_next + return True + + def _read_result_packet(self, first_packet): + self.field_count = first_packet.read_length_encoded_integer() + self._get_descriptions() + self._read_rowdata_packet() + + def _read_rowdata_packet_unbuffered(self): + # Check if in an active query + if not self.unbuffered_active: + return + + # EOF + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.unbuffered_active = False + self.connection = None + self.rows = None + return + + row = self._read_row_from_packet(packet) + self.affected_rows = 1 + self.rows = (row,) # rows should tuple of row for MySQL-python compatibility. + return row + + def _finish_unbuffered_query(self): + # After much reading on the MySQL protocol, it appears that there is, + # in fact, no way to stop MySQL from sending all the data after + # executing a query, so we just spin, and wait for an EOF packet. + while self.unbuffered_active: + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.unbuffered_active = False + self.connection = None # release reference to kill cyclic reference. + + def _read_rowdata_packet(self): + """Read a rowdata packet for each data row in the result set.""" + rows = [] + while True: + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.connection = None # release reference to kill cyclic reference. + break + rows.append(self._read_row_from_packet(packet)) + + self.affected_rows = len(rows) + self.rows = tuple(rows) + + def _read_row_from_packet(self, packet): + row = [] + for encoding, converter in self.converters: + try: + data = packet.read_length_coded_string() + except IndexError: + # No more columns in this row + # See https://github.com/PyMySQL/PyMySQL/pull/434 + break + if data is not None: + if encoding is not None: + data = data.decode(encoding) + if DEBUG: print("DEBUG: DATA = ", data) + if converter is not None: + data = converter(data) + row.append(data) + return tuple(row) + + def _get_descriptions(self): + """Read a column descriptor packet for each column in the result.""" + self.fields = [] + self.converters = [] + use_unicode = self.connection.use_unicode + conn_encoding = self.connection.encoding + description = [] + + for i in range_type(self.field_count): + field = self.connection._read_packet(FieldDescriptorPacket) + self.fields.append(field) + description.append(field.description()) + field_type = field.type_code + if use_unicode: + if field_type == FIELD_TYPE.JSON: + # When SELECT from JSON column: charset = binary + # When SELECT CAST(... AS JSON): charset = connection encoding + # This behavior is different from TEXT / BLOB. + # We should decode result by connection encoding regardless charsetnr. + # See https://github.com/PyMySQL/PyMySQL/issues/488 + encoding = conn_encoding # SELECT CAST(... AS JSON) + elif field_type in TEXT_TYPES: + if field.charsetnr == 63: # binary + # TEXTs with charset=binary means BINARY types. + encoding = None + else: + encoding = conn_encoding + else: + # Integers, Dates and Times, and other basic data is encoded in ascii + encoding = 'ascii' + else: + encoding = None + converter = self.connection.decoders.get(field_type) + if converter is converters.through: + converter = None + if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter)) + self.converters.append((encoding, converter)) + + eof_packet = self.connection._read_packet() + assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' + self.description = tuple(description) + + +class LoadLocalFile(object): + def __init__(self, filename, connection): + self.filename = filename + self.connection = connection + + def send_data(self): + """Send data packets from the local file to the server""" + if not self.connection._sock: + raise err.InterfaceError("(0, '')") + conn = self.connection + + try: + with open(self.filename, 'rb') as open_file: + packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough + while True: + chunk = open_file.read(packet_size) + if not chunk: + break + conn.write_packet(chunk) + except IOError: + raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename)) + finally: + # send the empty packet to signify we are done sending data + conn.write_packet(b'') diff --git a/utill/db/pymysql/constants/CLIENT.py b/utill/db/pymysql/constants/CLIENT.py new file mode 100644 index 0000000..b42f152 --- /dev/null +++ b/utill/db/pymysql/constants/CLIENT.py @@ -0,0 +1,31 @@ +# https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +LONG_PASSWORD = 1 +FOUND_ROWS = 1 << 1 +LONG_FLAG = 1 << 2 +CONNECT_WITH_DB = 1 << 3 +NO_SCHEMA = 1 << 4 +COMPRESS = 1 << 5 +ODBC = 1 << 6 +LOCAL_FILES = 1 << 7 +IGNORE_SPACE = 1 << 8 +PROTOCOL_41 = 1 << 9 +INTERACTIVE = 1 << 10 +SSL = 1 << 11 +IGNORE_SIGPIPE = 1 << 12 +TRANSACTIONS = 1 << 13 +SECURE_CONNECTION = 1 << 15 +MULTI_STATEMENTS = 1 << 16 +MULTI_RESULTS = 1 << 17 +PS_MULTI_RESULTS = 1 << 18 +PLUGIN_AUTH = 1 << 19 +CONNECT_ATTRS = 1 << 20 +PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21 +CAPABILITIES = ( + LONG_PASSWORD | LONG_FLAG | PROTOCOL_41 | TRANSACTIONS + | SECURE_CONNECTION | MULTI_RESULTS + | PLUGIN_AUTH | PLUGIN_AUTH_LENENC_CLIENT_DATA | CONNECT_ATTRS) + +# Not done yet +HANDLE_EXPIRED_PASSWORDS = 1 << 22 +SESSION_TRACK = 1 << 23 +DEPRECATE_EOF = 1 << 24 diff --git a/utill/db/pymysql/constants/COMMAND.py b/utill/db/pymysql/constants/COMMAND.py new file mode 100644 index 0000000..1da2755 --- /dev/null +++ b/utill/db/pymysql/constants/COMMAND.py @@ -0,0 +1,33 @@ + +COM_SLEEP = 0x00 +COM_QUIT = 0x01 +COM_INIT_DB = 0x02 +COM_QUERY = 0x03 +COM_FIELD_LIST = 0x04 +COM_CREATE_DB = 0x05 +COM_DROP_DB = 0x06 +COM_REFRESH = 0x07 +COM_SHUTDOWN = 0x08 +COM_STATISTICS = 0x09 +COM_PROCESS_INFO = 0x0a +COM_CONNECT = 0x0b +COM_PROCESS_KILL = 0x0c +COM_DEBUG = 0x0d +COM_PING = 0x0e +COM_TIME = 0x0f +COM_DELAYED_INSERT = 0x10 +COM_CHANGE_USER = 0x11 +COM_BINLOG_DUMP = 0x12 +COM_TABLE_DUMP = 0x13 +COM_CONNECT_OUT = 0x14 +COM_REGISTER_SLAVE = 0x15 +COM_STMT_PREPARE = 0x16 +COM_STMT_EXECUTE = 0x17 +COM_STMT_SEND_LONG_DATA = 0x18 +COM_STMT_CLOSE = 0x19 +COM_STMT_RESET = 0x1a +COM_SET_OPTION = 0x1b +COM_STMT_FETCH = 0x1c +COM_DAEMON = 0x1d +COM_BINLOG_DUMP_GTID = 0x1e +COM_END = 0x1f diff --git a/utill/db/pymysql/constants/CR.py b/utill/db/pymysql/constants/CR.py new file mode 100644 index 0000000..48ca956 --- /dev/null +++ b/utill/db/pymysql/constants/CR.py @@ -0,0 +1,68 @@ +# flake8: noqa +# errmsg.h +CR_ERROR_FIRST = 2000 +CR_UNKNOWN_ERROR = 2000 +CR_SOCKET_CREATE_ERROR = 2001 +CR_CONNECTION_ERROR = 2002 +CR_CONN_HOST_ERROR = 2003 +CR_IPSOCK_ERROR = 2004 +CR_UNKNOWN_HOST = 2005 +CR_SERVER_GONE_ERROR = 2006 +CR_VERSION_ERROR = 2007 +CR_OUT_OF_MEMORY = 2008 +CR_WRONG_HOST_INFO = 2009 +CR_LOCALHOST_CONNECTION = 2010 +CR_TCP_CONNECTION = 2011 +CR_SERVER_HANDSHAKE_ERR = 2012 +CR_SERVER_LOST = 2013 +CR_COMMANDS_OUT_OF_SYNC = 2014 +CR_NAMEDPIPE_CONNECTION = 2015 +CR_NAMEDPIPEWAIT_ERROR = 2016 +CR_NAMEDPIPEOPEN_ERROR = 2017 +CR_NAMEDPIPESETSTATE_ERROR = 2018 +CR_CANT_READ_CHARSET = 2019 +CR_NET_PACKET_TOO_LARGE = 2020 +CR_EMBEDDED_CONNECTION = 2021 +CR_PROBE_SLAVE_STATUS = 2022 +CR_PROBE_SLAVE_HOSTS = 2023 +CR_PROBE_SLAVE_CONNECT = 2024 +CR_PROBE_MASTER_CONNECT = 2025 +CR_SSL_CONNECTION_ERROR = 2026 +CR_MALFORMED_PACKET = 2027 +CR_WRONG_LICENSE = 2028 + +CR_NULL_POINTER = 2029 +CR_NO_PREPARE_STMT = 2030 +CR_PARAMS_NOT_BOUND = 2031 +CR_DATA_TRUNCATED = 2032 +CR_NO_PARAMETERS_EXISTS = 2033 +CR_INVALID_PARAMETER_NO = 2034 +CR_INVALID_BUFFER_USE = 2035 +CR_UNSUPPORTED_PARAM_TYPE = 2036 + +CR_SHARED_MEMORY_CONNECTION = 2037 +CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038 +CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039 +CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040 +CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041 +CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042 +CR_SHARED_MEMORY_MAP_ERROR = 2043 +CR_SHARED_MEMORY_EVENT_ERROR = 2044 +CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045 +CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046 +CR_CONN_UNKNOW_PROTOCOL = 2047 +CR_INVALID_CONN_HANDLE = 2048 +CR_SECURE_AUTH = 2049 +CR_FETCH_CANCELED = 2050 +CR_NO_DATA = 2051 +CR_NO_STMT_METADATA = 2052 +CR_NO_RESULT_SET = 2053 +CR_NOT_IMPLEMENTED = 2054 +CR_SERVER_LOST_EXTENDED = 2055 +CR_STMT_CLOSED = 2056 +CR_NEW_STMT_METADATA = 2057 +CR_ALREADY_CONNECTED = 2058 +CR_AUTH_PLUGIN_CANNOT_LOAD = 2059 +CR_DUPLICATE_CONNECTION_ATTR = 2060 +CR_AUTH_PLUGIN_ERR = 2061 +CR_ERROR_LAST = 2061 diff --git a/utill/db/pymysql/constants/ER.py b/utill/db/pymysql/constants/ER.py new file mode 100644 index 0000000..79b88af --- /dev/null +++ b/utill/db/pymysql/constants/ER.py @@ -0,0 +1,475 @@ + +ERROR_FIRST = 1000 +HASHCHK = 1000 +NISAMCHK = 1001 +NO = 1002 +YES = 1003 +CANT_CREATE_FILE = 1004 +CANT_CREATE_TABLE = 1005 +CANT_CREATE_DB = 1006 +DB_CREATE_EXISTS = 1007 +DB_DROP_EXISTS = 1008 +DB_DROP_DELETE = 1009 +DB_DROP_RMDIR = 1010 +CANT_DELETE_FILE = 1011 +CANT_FIND_SYSTEM_REC = 1012 +CANT_GET_STAT = 1013 +CANT_GET_WD = 1014 +CANT_LOCK = 1015 +CANT_OPEN_FILE = 1016 +FILE_NOT_FOUND = 1017 +CANT_READ_DIR = 1018 +CANT_SET_WD = 1019 +CHECKREAD = 1020 +DISK_FULL = 1021 +DUP_KEY = 1022 +ERROR_ON_CLOSE = 1023 +ERROR_ON_READ = 1024 +ERROR_ON_RENAME = 1025 +ERROR_ON_WRITE = 1026 +FILE_USED = 1027 +FILSORT_ABORT = 1028 +FORM_NOT_FOUND = 1029 +GET_ERRNO = 1030 +ILLEGAL_HA = 1031 +KEY_NOT_FOUND = 1032 +NOT_FORM_FILE = 1033 +NOT_KEYFILE = 1034 +OLD_KEYFILE = 1035 +OPEN_AS_READONLY = 1036 +OUTOFMEMORY = 1037 +OUT_OF_SORTMEMORY = 1038 +UNEXPECTED_EOF = 1039 +CON_COUNT_ERROR = 1040 +OUT_OF_RESOURCES = 1041 +BAD_HOST_ERROR = 1042 +HANDSHAKE_ERROR = 1043 +DBACCESS_DENIED_ERROR = 1044 +ACCESS_DENIED_ERROR = 1045 +NO_DB_ERROR = 1046 +UNKNOWN_COM_ERROR = 1047 +BAD_NULL_ERROR = 1048 +BAD_DB_ERROR = 1049 +TABLE_EXISTS_ERROR = 1050 +BAD_TABLE_ERROR = 1051 +NON_UNIQ_ERROR = 1052 +SERVER_SHUTDOWN = 1053 +BAD_FIELD_ERROR = 1054 +WRONG_FIELD_WITH_GROUP = 1055 +WRONG_GROUP_FIELD = 1056 +WRONG_SUM_SELECT = 1057 +WRONG_VALUE_COUNT = 1058 +TOO_LONG_IDENT = 1059 +DUP_FIELDNAME = 1060 +DUP_KEYNAME = 1061 +DUP_ENTRY = 1062 +WRONG_FIELD_SPEC = 1063 +PARSE_ERROR = 1064 +EMPTY_QUERY = 1065 +NONUNIQ_TABLE = 1066 +INVALID_DEFAULT = 1067 +MULTIPLE_PRI_KEY = 1068 +TOO_MANY_KEYS = 1069 +TOO_MANY_KEY_PARTS = 1070 +TOO_LONG_KEY = 1071 +KEY_COLUMN_DOES_NOT_EXITS = 1072 +BLOB_USED_AS_KEY = 1073 +TOO_BIG_FIELDLENGTH = 1074 +WRONG_AUTO_KEY = 1075 +READY = 1076 +NORMAL_SHUTDOWN = 1077 +GOT_SIGNAL = 1078 +SHUTDOWN_COMPLETE = 1079 +FORCING_CLOSE = 1080 +IPSOCK_ERROR = 1081 +NO_SUCH_INDEX = 1082 +WRONG_FIELD_TERMINATORS = 1083 +BLOBS_AND_NO_TERMINATED = 1084 +TEXTFILE_NOT_READABLE = 1085 +FILE_EXISTS_ERROR = 1086 +LOAD_INFO = 1087 +ALTER_INFO = 1088 +WRONG_SUB_KEY = 1089 +CANT_REMOVE_ALL_FIELDS = 1090 +CANT_DROP_FIELD_OR_KEY = 1091 +INSERT_INFO = 1092 +UPDATE_TABLE_USED = 1093 +NO_SUCH_THREAD = 1094 +KILL_DENIED_ERROR = 1095 +NO_TABLES_USED = 1096 +TOO_BIG_SET = 1097 +NO_UNIQUE_LOGFILE = 1098 +TABLE_NOT_LOCKED_FOR_WRITE = 1099 +TABLE_NOT_LOCKED = 1100 +BLOB_CANT_HAVE_DEFAULT = 1101 +WRONG_DB_NAME = 1102 +WRONG_TABLE_NAME = 1103 +TOO_BIG_SELECT = 1104 +UNKNOWN_ERROR = 1105 +UNKNOWN_PROCEDURE = 1106 +WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 +WRONG_PARAMETERS_TO_PROCEDURE = 1108 +UNKNOWN_TABLE = 1109 +FIELD_SPECIFIED_TWICE = 1110 +INVALID_GROUP_FUNC_USE = 1111 +UNSUPPORTED_EXTENSION = 1112 +TABLE_MUST_HAVE_COLUMNS = 1113 +RECORD_FILE_FULL = 1114 +UNKNOWN_CHARACTER_SET = 1115 +TOO_MANY_TABLES = 1116 +TOO_MANY_FIELDS = 1117 +TOO_BIG_ROWSIZE = 1118 +STACK_OVERRUN = 1119 +WRONG_OUTER_JOIN = 1120 +NULL_COLUMN_IN_INDEX = 1121 +CANT_FIND_UDF = 1122 +CANT_INITIALIZE_UDF = 1123 +UDF_NO_PATHS = 1124 +UDF_EXISTS = 1125 +CANT_OPEN_LIBRARY = 1126 +CANT_FIND_DL_ENTRY = 1127 +FUNCTION_NOT_DEFINED = 1128 +HOST_IS_BLOCKED = 1129 +HOST_NOT_PRIVILEGED = 1130 +PASSWORD_ANONYMOUS_USER = 1131 +PASSWORD_NOT_ALLOWED = 1132 +PASSWORD_NO_MATCH = 1133 +UPDATE_INFO = 1134 +CANT_CREATE_THREAD = 1135 +WRONG_VALUE_COUNT_ON_ROW = 1136 +CANT_REOPEN_TABLE = 1137 +INVALID_USE_OF_NULL = 1138 +REGEXP_ERROR = 1139 +MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 +NONEXISTING_GRANT = 1141 +TABLEACCESS_DENIED_ERROR = 1142 +COLUMNACCESS_DENIED_ERROR = 1143 +ILLEGAL_GRANT_FOR_TABLE = 1144 +GRANT_WRONG_HOST_OR_USER = 1145 +NO_SUCH_TABLE = 1146 +NONEXISTING_TABLE_GRANT = 1147 +NOT_ALLOWED_COMMAND = 1148 +SYNTAX_ERROR = 1149 +DELAYED_CANT_CHANGE_LOCK = 1150 +TOO_MANY_DELAYED_THREADS = 1151 +ABORTING_CONNECTION = 1152 +NET_PACKET_TOO_LARGE = 1153 +NET_READ_ERROR_FROM_PIPE = 1154 +NET_FCNTL_ERROR = 1155 +NET_PACKETS_OUT_OF_ORDER = 1156 +NET_UNCOMPRESS_ERROR = 1157 +NET_READ_ERROR = 1158 +NET_READ_INTERRUPTED = 1159 +NET_ERROR_ON_WRITE = 1160 +NET_WRITE_INTERRUPTED = 1161 +TOO_LONG_STRING = 1162 +TABLE_CANT_HANDLE_BLOB = 1163 +TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 +DELAYED_INSERT_TABLE_LOCKED = 1165 +WRONG_COLUMN_NAME = 1166 +WRONG_KEY_COLUMN = 1167 +WRONG_MRG_TABLE = 1168 +DUP_UNIQUE = 1169 +BLOB_KEY_WITHOUT_LENGTH = 1170 +PRIMARY_CANT_HAVE_NULL = 1171 +TOO_MANY_ROWS = 1172 +REQUIRES_PRIMARY_KEY = 1173 +NO_RAID_COMPILED = 1174 +UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 +KEY_DOES_NOT_EXITS = 1176 +CHECK_NO_SUCH_TABLE = 1177 +CHECK_NOT_IMPLEMENTED = 1178 +CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 +ERROR_DURING_COMMIT = 1180 +ERROR_DURING_ROLLBACK = 1181 +ERROR_DURING_FLUSH_LOGS = 1182 +ERROR_DURING_CHECKPOINT = 1183 +NEW_ABORTING_CONNECTION = 1184 +DUMP_NOT_IMPLEMENTED = 1185 +FLUSH_MASTER_BINLOG_CLOSED = 1186 +INDEX_REBUILD = 1187 +MASTER = 1188 +MASTER_NET_READ = 1189 +MASTER_NET_WRITE = 1190 +FT_MATCHING_KEY_NOT_FOUND = 1191 +LOCK_OR_ACTIVE_TRANSACTION = 1192 +UNKNOWN_SYSTEM_VARIABLE = 1193 +CRASHED_ON_USAGE = 1194 +CRASHED_ON_REPAIR = 1195 +WARNING_NOT_COMPLETE_ROLLBACK = 1196 +TRANS_CACHE_FULL = 1197 +SLAVE_MUST_STOP = 1198 +SLAVE_NOT_RUNNING = 1199 +BAD_SLAVE = 1200 +MASTER_INFO = 1201 +SLAVE_THREAD = 1202 +TOO_MANY_USER_CONNECTIONS = 1203 +SET_CONSTANTS_ONLY = 1204 +LOCK_WAIT_TIMEOUT = 1205 +LOCK_TABLE_FULL = 1206 +READ_ONLY_TRANSACTION = 1207 +DROP_DB_WITH_READ_LOCK = 1208 +CREATE_DB_WITH_READ_LOCK = 1209 +WRONG_ARGUMENTS = 1210 +NO_PERMISSION_TO_CREATE_USER = 1211 +UNION_TABLES_IN_DIFFERENT_DIR = 1212 +LOCK_DEADLOCK = 1213 +TABLE_CANT_HANDLE_FT = 1214 +CANNOT_ADD_FOREIGN = 1215 +NO_REFERENCED_ROW = 1216 +ROW_IS_REFERENCED = 1217 +CONNECT_TO_MASTER = 1218 +QUERY_ON_MASTER = 1219 +ERROR_WHEN_EXECUTING_COMMAND = 1220 +WRONG_USAGE = 1221 +WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 +CANT_UPDATE_WITH_READLOCK = 1223 +MIXING_NOT_ALLOWED = 1224 +DUP_ARGUMENT = 1225 +USER_LIMIT_REACHED = 1226 +SPECIFIC_ACCESS_DENIED_ERROR = 1227 +LOCAL_VARIABLE = 1228 +GLOBAL_VARIABLE = 1229 +NO_DEFAULT = 1230 +WRONG_VALUE_FOR_VAR = 1231 +WRONG_TYPE_FOR_VAR = 1232 +VAR_CANT_BE_READ = 1233 +CANT_USE_OPTION_HERE = 1234 +NOT_SUPPORTED_YET = 1235 +MASTER_FATAL_ERROR_READING_BINLOG = 1236 +SLAVE_IGNORED_TABLE = 1237 +INCORRECT_GLOBAL_LOCAL_VAR = 1238 +WRONG_FK_DEF = 1239 +KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 +OPERAND_COLUMNS = 1241 +SUBQUERY_NO_1_ROW = 1242 +UNKNOWN_STMT_HANDLER = 1243 +CORRUPT_HELP_DB = 1244 +CYCLIC_REFERENCE = 1245 +AUTO_CONVERT = 1246 +ILLEGAL_REFERENCE = 1247 +DERIVED_MUST_HAVE_ALIAS = 1248 +SELECT_REDUCED = 1249 +TABLENAME_NOT_ALLOWED_HERE = 1250 +NOT_SUPPORTED_AUTH_MODE = 1251 +SPATIAL_CANT_HAVE_NULL = 1252 +COLLATION_CHARSET_MISMATCH = 1253 +SLAVE_WAS_RUNNING = 1254 +SLAVE_WAS_NOT_RUNNING = 1255 +TOO_BIG_FOR_UNCOMPRESS = 1256 +ZLIB_Z_MEM_ERROR = 1257 +ZLIB_Z_BUF_ERROR = 1258 +ZLIB_Z_DATA_ERROR = 1259 +CUT_VALUE_GROUP_CONCAT = 1260 +WARN_TOO_FEW_RECORDS = 1261 +WARN_TOO_MANY_RECORDS = 1262 +WARN_NULL_TO_NOTNULL = 1263 +WARN_DATA_OUT_OF_RANGE = 1264 +WARN_DATA_TRUNCATED = 1265 +WARN_USING_OTHER_HANDLER = 1266 +CANT_AGGREGATE_2COLLATIONS = 1267 +DROP_USER = 1268 +REVOKE_GRANTS = 1269 +CANT_AGGREGATE_3COLLATIONS = 1270 +CANT_AGGREGATE_NCOLLATIONS = 1271 +VARIABLE_IS_NOT_STRUCT = 1272 +UNKNOWN_COLLATION = 1273 +SLAVE_IGNORED_SSL_PARAMS = 1274 +SERVER_IS_IN_SECURE_AUTH_MODE = 1275 +WARN_FIELD_RESOLVED = 1276 +BAD_SLAVE_UNTIL_COND = 1277 +MISSING_SKIP_SLAVE = 1278 +UNTIL_COND_IGNORED = 1279 +WRONG_NAME_FOR_INDEX = 1280 +WRONG_NAME_FOR_CATALOG = 1281 +WARN_QC_RESIZE = 1282 +BAD_FT_COLUMN = 1283 +UNKNOWN_KEY_CACHE = 1284 +WARN_HOSTNAME_WONT_WORK = 1285 +UNKNOWN_STORAGE_ENGINE = 1286 +WARN_DEPRECATED_SYNTAX = 1287 +NON_UPDATABLE_TABLE = 1288 +FEATURE_DISABLED = 1289 +OPTION_PREVENTS_STATEMENT = 1290 +DUPLICATED_VALUE_IN_TYPE = 1291 +TRUNCATED_WRONG_VALUE = 1292 +TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 +INVALID_ON_UPDATE = 1294 +UNSUPPORTED_PS = 1295 +GET_ERRMSG = 1296 +GET_TEMPORARY_ERRMSG = 1297 +UNKNOWN_TIME_ZONE = 1298 +WARN_INVALID_TIMESTAMP = 1299 +INVALID_CHARACTER_STRING = 1300 +WARN_ALLOWED_PACKET_OVERFLOWED = 1301 +CONFLICTING_DECLARATIONS = 1302 +SP_NO_RECURSIVE_CREATE = 1303 +SP_ALREADY_EXISTS = 1304 +SP_DOES_NOT_EXIST = 1305 +SP_DROP_FAILED = 1306 +SP_STORE_FAILED = 1307 +SP_LILABEL_MISMATCH = 1308 +SP_LABEL_REDEFINE = 1309 +SP_LABEL_MISMATCH = 1310 +SP_UNINIT_VAR = 1311 +SP_BADSELECT = 1312 +SP_BADRETURN = 1313 +SP_BADSTATEMENT = 1314 +UPDATE_LOG_DEPRECATED_IGNORED = 1315 +UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 +QUERY_INTERRUPTED = 1317 +SP_WRONG_NO_OF_ARGS = 1318 +SP_COND_MISMATCH = 1319 +SP_NORETURN = 1320 +SP_NORETURNEND = 1321 +SP_BAD_CURSOR_QUERY = 1322 +SP_BAD_CURSOR_SELECT = 1323 +SP_CURSOR_MISMATCH = 1324 +SP_CURSOR_ALREADY_OPEN = 1325 +SP_CURSOR_NOT_OPEN = 1326 +SP_UNDECLARED_VAR = 1327 +SP_WRONG_NO_OF_FETCH_ARGS = 1328 +SP_FETCH_NO_DATA = 1329 +SP_DUP_PARAM = 1330 +SP_DUP_VAR = 1331 +SP_DUP_COND = 1332 +SP_DUP_CURS = 1333 +SP_CANT_ALTER = 1334 +SP_SUBSELECT_NYI = 1335 +STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 +SP_VARCOND_AFTER_CURSHNDLR = 1337 +SP_CURSOR_AFTER_HANDLER = 1338 +SP_CASE_NOT_FOUND = 1339 +FPARSER_TOO_BIG_FILE = 1340 +FPARSER_BAD_HEADER = 1341 +FPARSER_EOF_IN_COMMENT = 1342 +FPARSER_ERROR_IN_PARAMETER = 1343 +FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 +VIEW_NO_EXPLAIN = 1345 +FRM_UNKNOWN_TYPE = 1346 +WRONG_OBJECT = 1347 +NONUPDATEABLE_COLUMN = 1348 +VIEW_SELECT_DERIVED = 1349 +VIEW_SELECT_CLAUSE = 1350 +VIEW_SELECT_VARIABLE = 1351 +VIEW_SELECT_TMPTABLE = 1352 +VIEW_WRONG_LIST = 1353 +WARN_VIEW_MERGE = 1354 +WARN_VIEW_WITHOUT_KEY = 1355 +VIEW_INVALID = 1356 +SP_NO_DROP_SP = 1357 +SP_GOTO_IN_HNDLR = 1358 +TRG_ALREADY_EXISTS = 1359 +TRG_DOES_NOT_EXIST = 1360 +TRG_ON_VIEW_OR_TEMP_TABLE = 1361 +TRG_CANT_CHANGE_ROW = 1362 +TRG_NO_SUCH_ROW_IN_TRG = 1363 +NO_DEFAULT_FOR_FIELD = 1364 +DIVISION_BY_ZERO = 1365 +TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 +ILLEGAL_VALUE_FOR_TYPE = 1367 +VIEW_NONUPD_CHECK = 1368 +VIEW_CHECK_FAILED = 1369 +PROCACCESS_DENIED_ERROR = 1370 +RELAY_LOG_FAIL = 1371 +PASSWD_LENGTH = 1372 +UNKNOWN_TARGET_BINLOG = 1373 +IO_ERR_LOG_INDEX_READ = 1374 +BINLOG_PURGE_PROHIBITED = 1375 +FSEEK_FAIL = 1376 +BINLOG_PURGE_FATAL_ERR = 1377 +LOG_IN_USE = 1378 +LOG_PURGE_UNKNOWN_ERR = 1379 +RELAY_LOG_INIT = 1380 +NO_BINARY_LOGGING = 1381 +RESERVED_SYNTAX = 1382 +WSAS_FAILED = 1383 +DIFF_GROUPS_PROC = 1384 +NO_GROUP_FOR_PROC = 1385 +ORDER_WITH_PROC = 1386 +LOGGING_PROHIBIT_CHANGING_OF = 1387 +NO_FILE_MAPPING = 1388 +WRONG_MAGIC = 1389 +PS_MANY_PARAM = 1390 +KEY_PART_0 = 1391 +VIEW_CHECKSUM = 1392 +VIEW_MULTIUPDATE = 1393 +VIEW_NO_INSERT_FIELD_LIST = 1394 +VIEW_DELETE_MERGE_VIEW = 1395 +CANNOT_USER = 1396 +XAER_NOTA = 1397 +XAER_INVAL = 1398 +XAER_RMFAIL = 1399 +XAER_OUTSIDE = 1400 +XAER_RMERR = 1401 +XA_RBROLLBACK = 1402 +NONEXISTING_PROC_GRANT = 1403 +PROC_AUTO_GRANT_FAIL = 1404 +PROC_AUTO_REVOKE_FAIL = 1405 +DATA_TOO_LONG = 1406 +SP_BAD_SQLSTATE = 1407 +STARTUP = 1408 +LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 +CANT_CREATE_USER_WITH_GRANT = 1410 +WRONG_VALUE_FOR_TYPE = 1411 +TABLE_DEF_CHANGED = 1412 +SP_DUP_HANDLER = 1413 +SP_NOT_VAR_ARG = 1414 +SP_NO_RETSET = 1415 +CANT_CREATE_GEOMETRY_OBJECT = 1416 +FAILED_ROUTINE_BREAK_BINLOG = 1417 +BINLOG_UNSAFE_ROUTINE = 1418 +BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 +EXEC_STMT_WITH_OPEN_CURSOR = 1420 +STMT_HAS_NO_OPEN_CURSOR = 1421 +COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 +NO_DEFAULT_FOR_VIEW_FIELD = 1423 +SP_NO_RECURSION = 1424 +TOO_BIG_SCALE = 1425 +TOO_BIG_PRECISION = 1426 +M_BIGGER_THAN_D = 1427 +WRONG_LOCK_OF_SYSTEM_TABLE = 1428 +CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 +QUERY_ON_FOREIGN_DATA_SOURCE = 1430 +FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 +FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 +FOREIGN_DATA_STRING_INVALID = 1433 +CANT_CREATE_FEDERATED_TABLE = 1434 +TRG_IN_WRONG_SCHEMA = 1435 +STACK_OVERRUN_NEED_MORE = 1436 +TOO_LONG_BODY = 1437 +WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 +TOO_BIG_DISPLAYWIDTH = 1439 +XAER_DUPID = 1440 +DATETIME_FUNCTION_OVERFLOW = 1441 +CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 +VIEW_PREVENT_UPDATE = 1443 +PS_NO_RECURSION = 1444 +SP_CANT_SET_AUTOCOMMIT = 1445 +MALFORMED_DEFINER = 1446 +VIEW_FRM_NO_USER = 1447 +VIEW_OTHER_USER = 1448 +NO_SUCH_USER = 1449 +FORBID_SCHEMA_CHANGE = 1450 +ROW_IS_REFERENCED_2 = 1451 +NO_REFERENCED_ROW_2 = 1452 +SP_BAD_VAR_SHADOW = 1453 +TRG_NO_DEFINER = 1454 +OLD_FILE_FORMAT = 1455 +SP_RECURSION_LIMIT = 1456 +SP_PROC_TABLE_CORRUPT = 1457 +SP_WRONG_NAME = 1458 +TABLE_NEEDS_UPGRADE = 1459 +SP_NO_AGGREGATE = 1460 +MAX_PREPARED_STMT_COUNT_REACHED = 1461 +VIEW_RECURSIVE = 1462 +NON_GROUPING_FIELD_USED = 1463 +TABLE_CANT_HANDLE_SPKEYS = 1464 +NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 +USERNAME = 1466 +HOSTNAME = 1467 +WRONG_STRING_LENGTH = 1468 +ERROR_LAST = 1468 + +# https://github.com/PyMySQL/PyMySQL/issues/607 +CONSTRAINT_FAILED = 4025 diff --git a/utill/db/pymysql/constants/FIELD_TYPE.py b/utill/db/pymysql/constants/FIELD_TYPE.py new file mode 100644 index 0000000..51bd514 --- /dev/null +++ b/utill/db/pymysql/constants/FIELD_TYPE.py @@ -0,0 +1,33 @@ + + +DECIMAL = 0 +TINY = 1 +SHORT = 2 +LONG = 3 +FLOAT = 4 +DOUBLE = 5 +NULL = 6 +TIMESTAMP = 7 +LONGLONG = 8 +INT24 = 9 +DATE = 10 +TIME = 11 +DATETIME = 12 +YEAR = 13 +NEWDATE = 14 +VARCHAR = 15 +BIT = 16 +JSON = 245 +NEWDECIMAL = 246 +ENUM = 247 +SET = 248 +TINY_BLOB = 249 +MEDIUM_BLOB = 250 +LONG_BLOB = 251 +BLOB = 252 +VAR_STRING = 253 +STRING = 254 +GEOMETRY = 255 + +CHAR = TINY +INTERVAL = ENUM diff --git a/utill/db/pymysql/constants/FLAG.py b/utill/db/pymysql/constants/FLAG.py new file mode 100644 index 0000000..f9ebfad --- /dev/null +++ b/utill/db/pymysql/constants/FLAG.py @@ -0,0 +1,15 @@ +NOT_NULL = 1 +PRI_KEY = 2 +UNIQUE_KEY = 4 +MULTIPLE_KEY = 8 +BLOB = 16 +UNSIGNED = 32 +ZEROFILL = 64 +BINARY = 128 +ENUM = 256 +AUTO_INCREMENT = 512 +TIMESTAMP = 1024 +SET = 2048 +PART_KEY = 16384 +GROUP = 32767 +UNIQUE = 65536 diff --git a/utill/db/pymysql/constants/SERVER_STATUS.py b/utill/db/pymysql/constants/SERVER_STATUS.py new file mode 100644 index 0000000..6f5d566 --- /dev/null +++ b/utill/db/pymysql/constants/SERVER_STATUS.py @@ -0,0 +1,11 @@ + +SERVER_STATUS_IN_TRANS = 1 +SERVER_STATUS_AUTOCOMMIT = 2 +SERVER_MORE_RESULTS_EXISTS = 8 +SERVER_QUERY_NO_GOOD_INDEX_USED = 16 +SERVER_QUERY_NO_INDEX_USED = 32 +SERVER_STATUS_CURSOR_EXISTS = 64 +SERVER_STATUS_LAST_ROW_SENT = 128 +SERVER_STATUS_DB_DROPPED = 256 +SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512 +SERVER_STATUS_METADATA_CHANGED = 1024 diff --git a/utill/db/pymysql/constants/__init__.py b/utill/db/pymysql/constants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utill/db/pymysql/converters.py b/utill/db/pymysql/converters.py new file mode 100644 index 0000000..ce2be06 --- /dev/null +++ b/utill/db/pymysql/converters.py @@ -0,0 +1,411 @@ +from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr + +import datetime +from decimal import Decimal +import re +import time + +from .constants import FIELD_TYPE, FLAG +from .charset import charset_by_id, charset_to_encoding + + +def escape_item(val, charset, mapping=None): + if mapping is None: + mapping = encoders + encoder = mapping.get(type(val)) + + # Fallback to default when no encoder found + if not encoder: + try: + encoder = mapping[text_type] + except KeyError: + raise TypeError("no default type converter defined") + + if encoder in (escape_dict, escape_sequence): + val = encoder(val, charset, mapping) + else: + val = encoder(val, mapping) + return val + +def escape_dict(val, charset, mapping=None): + n = {} + for k, v in val.items(): + quoted = escape_item(v, charset, mapping) + n[k] = quoted + return n + +def escape_sequence(val, charset, mapping=None): + n = [] + for item in val: + quoted = escape_item(item, charset, mapping) + n.append(quoted) + return "(" + ",".join(n) + ")" + +def escape_set(val, charset, mapping=None): + return ','.join([escape_item(x, charset, mapping) for x in val]) + +def escape_bool(value, mapping=None): + return str(int(value)) + +def escape_object(value, mapping=None): + return str(value) + +def escape_int(value, mapping=None): + return str(value) + +def escape_float(value, mapping=None): + return ('%.15g' % value) + +_escape_table = [unichr(x) for x in range(128)] +_escape_table[0] = u'\\0' +_escape_table[ord('\\')] = u'\\\\' +_escape_table[ord('\n')] = u'\\n' +_escape_table[ord('\r')] = u'\\r' +_escape_table[ord('\032')] = u'\\Z' +_escape_table[ord('"')] = u'\\"' +_escape_table[ord("'")] = u"\\'" + +def _escape_unicode(value, mapping=None): + """escapes *value* without adding quote. + + Value should be unicode + """ + return value.translate(_escape_table) + +if PY2: + def escape_string(value, mapping=None): + """escape_string escapes *value* but not surround it with quotes. + + Value should be bytes or unicode. + """ + if isinstance(value, unicode): + return _escape_unicode(value) + assert isinstance(value, (bytes, bytearray)) + value = value.replace('\\', '\\\\') + value = value.replace('\0', '\\0') + value = value.replace('\n', '\\n') + value = value.replace('\r', '\\r') + value = value.replace('\032', '\\Z') + value = value.replace("'", "\\'") + value = value.replace('"', '\\"') + return value + + def escape_bytes_prefixed(value, mapping=None): + assert isinstance(value, (bytes, bytearray)) + return b"_binary'%s'" % escape_string(value) + + def escape_bytes(value, mapping=None): + assert isinstance(value, (bytes, bytearray)) + return b"'%s'" % escape_string(value) + +else: + escape_string = _escape_unicode + + # On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow. + # (fixed in Python 3.6, http://bugs.python.org/issue24870) + # Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff. + # We can escape special chars and surrogateescape at once. + _escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)] + + def escape_bytes_prefixed(value, mapping=None): + return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table) + + def escape_bytes(value, mapping=None): + return "'%s'" % value.decode('latin1').translate(_escape_bytes_table) + + +def escape_unicode(value, mapping=None): + return u"'%s'" % _escape_unicode(value) + +def escape_str(value, mapping=None): + return "'%s'" % escape_string(str(value), mapping) + +def escape_None(value, mapping=None): + return 'NULL' + +def escape_timedelta(obj, mapping=None): + seconds = int(obj.seconds) % 60 + minutes = int(obj.seconds // 60) % 60 + hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24 + if obj.microseconds: + fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'" + else: + fmt = "'{0:02d}:{1:02d}:{2:02d}'" + return fmt.format(hours, minutes, seconds, obj.microseconds) + +def escape_time(obj, mapping=None): + if obj.microsecond: + fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" + else: + fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'" + return fmt.format(obj) + +def escape_datetime(obj, mapping=None): + if obj.microsecond: + fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" + else: + fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'" + return fmt.format(obj) + +def escape_date(obj, mapping=None): + fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'" + return fmt.format(obj) + +def escape_struct_time(obj, mapping=None): + return escape_datetime(datetime.datetime(*obj[:6])) + +def _convert_second_fraction(s): + if not s: + return 0 + # Pad zeros to ensure the fraction length in microseconds + s = s.ljust(6, '0') + return int(s[:6]) + +DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_datetime(obj): + """Returns a DATETIME or TIMESTAMP column value as a datetime object: + + >>> datetime_or_None('2007-02-25 23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + >>> datetime_or_None('2007-02-25T23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + + Illegal values are returned as None: + + >>> datetime_or_None('2007-02-31T23:06:20') is None + True + >>> datetime_or_None('0000-00-00 00:00:00') is None + True + + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = DATETIME_RE.match(obj) + if not m: + return convert_date(obj) + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + return datetime.datetime(*[ int(x) for x in groups ]) + except ValueError: + return convert_date(obj) + +TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_timedelta(obj): + """Returns a TIME column as a timedelta object: + + >>> timedelta_or_None('25:06:17') + datetime.timedelta(1, 3977) + >>> timedelta_or_None('-25:06:17') + datetime.timedelta(-2, 83177) + + Illegal values are returned as None: + + >>> timedelta_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = TIMEDELTA_RE.match(obj) + if not m: + return obj + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + negate = -1 if groups[0] else 1 + hours, minutes, seconds, microseconds = groups[1:] + + tdelta = datetime.timedelta( + hours = int(hours), + minutes = int(minutes), + seconds = int(seconds), + microseconds = int(microseconds) + ) * negate + return tdelta + except ValueError: + return obj + +TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_time(obj): + """Returns a TIME column as a time object: + + >>> time_or_None('15:06:17') + datetime.time(15, 6, 17) + + Illegal values are returned as None: + + >>> time_or_None('-25:06:17') is None + True + >>> time_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + + Also note that MySQL's TIME column corresponds more closely to + Python's timedelta and not time. However if you want TIME columns + to be treated as time-of-day and not a time offset, then you can + use set this function as the converter for FIELD_TYPE.TIME. + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = TIME_RE.match(obj) + if not m: + return obj + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + hours, minutes, seconds, microseconds = groups + return datetime.time(hour=int(hours), minute=int(minutes), + second=int(seconds), microsecond=int(microseconds)) + except ValueError: + return obj + + +def convert_date(obj): + """Returns a DATE column as a date object: + + >>> date_or_None('2007-02-26') + datetime.date(2007, 2, 26) + + Illegal values are returned as None: + + >>> date_or_None('2007-02-31') is None + True + >>> date_or_None('0000-00-00') is None + True + + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + try: + return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) + except ValueError: + return obj + + +def convert_mysql_timestamp(timestamp): + """Convert a MySQL TIMESTAMP to a Timestamp object. + + MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME: + + >>> mysql_timestamp_converter('2007-02-25 22:32:17') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + MySQL < 4.1 uses a big string of numbers: + + >>> mysql_timestamp_converter('20070225223217') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + Illegal values are returned as None: + + >>> mysql_timestamp_converter('2007-02-31 22:32:17') is None + True + >>> mysql_timestamp_converter('00000000000000') is None + True + + """ + if not PY2 and isinstance(timestamp, (bytes, bytearray)): + timestamp = timestamp.decode('ascii') + if timestamp[4] == '-': + return convert_datetime(timestamp) + timestamp += "0"*(14-len(timestamp)) # padding + year, month, day, hour, minute, second = \ + int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \ + int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]) + try: + return datetime.datetime(year, month, day, hour, minute, second) + except ValueError: + return timestamp + +def convert_set(s): + if isinstance(s, (bytes, bytearray)): + return set(s.split(b",")) + return set(s.split(",")) + + +def through(x): + return x + + +#def convert_bit(b): +# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes +# return struct.unpack(">Q", b)[0] +# +# the snippet above is right, but MySQLdb doesn't process bits, +# so we shouldn't either +convert_bit = through + + +encoders = { + bool: escape_bool, + int: escape_int, + long_type: escape_int, + float: escape_float, + str: escape_str, + text_type: escape_unicode, + tuple: escape_sequence, + list: escape_sequence, + set: escape_sequence, + frozenset: escape_sequence, + dict: escape_dict, + type(None): escape_None, + datetime.date: escape_date, + datetime.datetime: escape_datetime, + datetime.timedelta: escape_timedelta, + datetime.time: escape_time, + time.struct_time: escape_struct_time, + Decimal: escape_object, +} + +if not PY2 or JYTHON or IRONPYTHON: + encoders[bytes] = escape_bytes + +decoders = { + FIELD_TYPE.BIT: convert_bit, + FIELD_TYPE.TINY: int, + FIELD_TYPE.SHORT: int, + FIELD_TYPE.LONG: int, + FIELD_TYPE.FLOAT: float, + FIELD_TYPE.DOUBLE: float, + FIELD_TYPE.LONGLONG: int, + FIELD_TYPE.INT24: int, + FIELD_TYPE.YEAR: int, + FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp, + FIELD_TYPE.DATETIME: convert_datetime, + FIELD_TYPE.TIME: convert_timedelta, + FIELD_TYPE.DATE: convert_date, + FIELD_TYPE.SET: convert_set, + FIELD_TYPE.BLOB: through, + FIELD_TYPE.TINY_BLOB: through, + FIELD_TYPE.MEDIUM_BLOB: through, + FIELD_TYPE.LONG_BLOB: through, + FIELD_TYPE.STRING: through, + FIELD_TYPE.VAR_STRING: through, + FIELD_TYPE.VARCHAR: through, + FIELD_TYPE.DECIMAL: Decimal, + FIELD_TYPE.NEWDECIMAL: Decimal, +} + + +# for MySQLdb compatibility +conversions = encoders.copy() +conversions.update(decoders) +Thing2Literal = escape_str diff --git a/utill/db/pymysql/cursors.py b/utill/db/pymysql/cursors.py new file mode 100644 index 0000000..a6d645d --- /dev/null +++ b/utill/db/pymysql/cursors.py @@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import +from functools import partial +import re +import warnings + +from ._compat import range_type, text_type, PY2 +from . import err + + +#: Regular expression for :meth:`Cursor.executemany`. +#: executemany only suports simple bulk insert. +#: You can use it to load large dataset. +RE_INSERT_VALUES = re.compile( + r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)" + + r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" + + r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z", + re.IGNORECASE | re.DOTALL) + + +class Cursor(object): + """ + This is the object you use to interact with the database. + + Do not create an instance of a Cursor yourself. Call + connections.Connection.cursor(). + + See `Cursor `_ in + the specification. + """ + + #: Max statement size which :meth:`executemany` generates. + #: + #: Max size of allowed statement is max_allowed_packet - packet_header_size. + #: Default value of max_allowed_packet is 1048576. + max_stmt_length = 1024000 + + _defer_warnings = False + + def __init__(self, connection): + self.connection = connection + self.description = None + self.rownumber = 0 + self.rowcount = -1 + self.arraysize = 1 + self._executed = None + self._result = None + self._rows = None + self._warnings_handled = False + + def close(self): + """ + Closing a cursor just exhausts all remaining data. + """ + conn = self.connection + if conn is None: + return + try: + while self.nextset(): + pass + finally: + self.connection = None + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + del exc_info + self.close() + + def _get_db(self): + if not self.connection: + raise err.ProgrammingError("Cursor closed") + return self.connection + + def _check_executed(self): + if not self._executed: + raise err.ProgrammingError("execute() first") + + def _conv_row(self, row): + return row + + def setinputsizes(self, *args): + """Does nothing, required by DB API.""" + + def setoutputsizes(self, *args): + """Does nothing, required by DB API.""" + + def _nextset(self, unbuffered=False): + """Get the next query set""" + conn = self._get_db() + current_result = self._result + # for unbuffered queries warnings are only available once whole result has been read + if unbuffered: + self._show_warnings() + if current_result is None or current_result is not conn._result: + return None + if not current_result.has_next: + return None + self._result = None + self._clear_result() + conn.next_result(unbuffered=unbuffered) + self._do_get_result() + return True + + def nextset(self): + return self._nextset(False) + + def _ensure_bytes(self, x, encoding=None): + if isinstance(x, text_type): + x = x.encode(encoding) + elif isinstance(x, (tuple, list)): + x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x) + return x + + def _escape_args(self, args, conn): + ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding) + + if isinstance(args, (tuple, list)): + if PY2: + args = tuple(map(ensure_bytes, args)) + return tuple(conn.literal(arg) for arg in args) + elif isinstance(args, dict): + if PY2: + args = {ensure_bytes(key): ensure_bytes(val) for + (key, val) in args.items()} + return {key: conn.literal(val) for (key, val) in args.items()} + else: + # If it's not a dictionary let's try escaping it anyways. + # Worst case it will throw a Value error + if PY2: + args = ensure_bytes(args) + return conn.escape(args) + + def mogrify(self, query, args=None): + """ + Returns the exact string that is sent to the database by calling the + execute() method. + + This method follows the extension to the DB API 2.0 followed by Psycopg. + """ + conn = self._get_db() + if PY2: # Use bytes on Python 2 always + query = self._ensure_bytes(query, encoding=conn.encoding) + + if args is not None: + query = query % self._escape_args(args, conn) + + return query + + def execute(self, query, args=None): + """Execute a query + + :param str query: Query to execute. + + :param args: parameters used with query. (optional) + :type args: tuple, list or dict + + :return: Number of affected rows + :rtype: int + + If args is a list or tuple, %s can be used as a placeholder in the query. + If args is a dict, %(name)s can be used as a placeholder in the query. + """ + while self.nextset(): + pass + + query = self.mogrify(query, args) + + result = self._query(query) + self._executed = query + return result + + def executemany(self, query, args): + # type: (str, list) -> int + """Run several data against one query + + :param query: query to execute on server + :param args: Sequence of sequences or mappings. It is used as parameter. + :return: Number of rows affected, if any. + + This method improves performance on multiple-row INSERT and + REPLACE. Otherwise it is equivalent to looping over args with + execute(). + """ + if not args: + return + + m = RE_INSERT_VALUES.match(query) + if m: + q_prefix = m.group(1) % () + q_values = m.group(2).rstrip() + q_postfix = m.group(3) or '' + assert q_values[0] == '(' and q_values[-1] == ')' + return self._do_execute_many(q_prefix, q_values, q_postfix, args, + self.max_stmt_length, + self._get_db().encoding) + + self.rowcount = sum(self.execute(query, arg) for arg in args) + return self.rowcount + + def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding): + conn = self._get_db() + escape = self._escape_args + if isinstance(prefix, text_type): + prefix = prefix.encode(encoding) + if PY2 and isinstance(values, text_type): + values = values.encode(encoding) + if isinstance(postfix, text_type): + postfix = postfix.encode(encoding) + sql = bytearray(prefix) + args = iter(args) + v = values % escape(next(args), conn) + if isinstance(v, text_type): + if PY2: + v = v.encode(encoding) + else: + v = v.encode(encoding, 'surrogateescape') + sql += v + rows = 0 + for arg in args: + v = values % escape(arg, conn) + if isinstance(v, text_type): + if PY2: + v = v.encode(encoding) + else: + v = v.encode(encoding, 'surrogateescape') + if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length: + rows += self.execute(sql + postfix) + sql = bytearray(prefix) + else: + sql += b',' + sql += v + rows += self.execute(sql + postfix) + self.rowcount = rows + return rows + + def callproc(self, procname, args=()): + """Execute stored procedure procname with args + + procname -- string, name of procedure to execute on server + + args -- Sequence of parameters to use with procedure + + Returns the original args. + + Compatibility warning: PEP-249 specifies that any modified + parameters must be returned. This is currently impossible + as they are only available by storing them in a server + variable and then retrieved by a query. Since stored + procedures return zero or more result sets, there is no + reliable way to get at OUT or INOUT parameters via callproc. + The server variables are named @_procname_n, where procname + is the parameter above and n is the position of the parameter + (from zero). Once all result sets generated by the procedure + have been fetched, you can issue a SELECT @_procname_0, ... + query using .execute() to get any OUT or INOUT values. + + Compatibility warning: The act of calling a stored procedure + itself creates an empty result set. This appears after any + result sets generated by the procedure. This is non-standard + behavior with respect to the DB-API. Be sure to use nextset() + to advance through all result sets; otherwise you may get + disconnected. + """ + conn = self._get_db() + if args: + fmt = '@_{0}_%d=%s'.format(procname) + self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg)) + for index, arg in enumerate(args))) + self.nextset() + + q = "CALL %s(%s)" % (procname, + ','.join(['@_%s_%d' % (procname, i) + for i in range_type(len(args))])) + self._query(q) + self._executed = q + return args + + def fetchone(self): + """Fetch the next row""" + self._check_executed() + if self._rows is None or self.rownumber >= len(self._rows): + return None + result = self._rows[self.rownumber] + self.rownumber += 1 + return result + + def fetchmany(self, size=None): + """Fetch several rows""" + self._check_executed() + if self._rows is None: + return () + end = self.rownumber + (size or self.arraysize) + result = self._rows[self.rownumber:end] + self.rownumber = min(end, len(self._rows)) + return result + + def fetchall(self): + """Fetch all the rows""" + self._check_executed() + if self._rows is None: + return () + if self.rownumber: + result = self._rows[self.rownumber:] + else: + result = self._rows + self.rownumber = len(self._rows) + return result + + def scroll(self, value, mode='relative'): + self._check_executed() + if mode == 'relative': + r = self.rownumber + value + elif mode == 'absolute': + r = value + else: + raise err.ProgrammingError("unknown scroll mode %s" % mode) + + if not (0 <= r < len(self._rows)): + raise IndexError("out of range") + self.rownumber = r + + def _query(self, q): + conn = self._get_db() + self._last_executed = q + self._clear_result() + conn.query(q) + self._do_get_result() + return self.rowcount + + def _clear_result(self): + self.rownumber = 0 + self._result = None + + self.rowcount = 0 + self.description = None + self.lastrowid = None + self._rows = None + + def _do_get_result(self): + conn = self._get_db() + + self._result = result = conn._result + + self.rowcount = result.affected_rows + self.description = result.description + self.lastrowid = result.insert_id + self._rows = result.rows + self._warnings_handled = False + + if not self._defer_warnings: + self._show_warnings() + + def _show_warnings(self): + if self._warnings_handled: + return + self._warnings_handled = True + if self._result and (self._result.has_next or not self._result.warning_count): + return + ws = self._get_db().show_warnings() + if ws is None: + return + for w in ws: + msg = w[-1] + if PY2: + if isinstance(msg, unicode): + msg = msg.encode('utf-8', 'replace') + warnings.warn(err.Warning(*w[1:3]), stacklevel=4) + + def __iter__(self): + return iter(self.fetchone, None) + + Warning = err.Warning + Error = err.Error + InterfaceError = err.InterfaceError + DatabaseError = err.DatabaseError + DataError = err.DataError + OperationalError = err.OperationalError + IntegrityError = err.IntegrityError + InternalError = err.InternalError + ProgrammingError = err.ProgrammingError + NotSupportedError = err.NotSupportedError + + +class DictCursorMixin(object): + # You can override this to use OrderedDict or other dict-like types. + dict_type = dict + + def _do_get_result(self): + super(DictCursorMixin, self)._do_get_result() + fields = [] + if self.description: + for f in self._result.fields: + name = f.name + if name in fields: + name = f.table_name + '.' + name + fields.append(name) + self._fields = fields + + if fields and self._rows: + self._rows = [self._conv_row(r) for r in self._rows] + + def _conv_row(self, row): + if row is None: + return None + return self.dict_type(zip(self._fields, row)) + + +class DictCursor(DictCursorMixin, Cursor): + """A cursor which returns results as a dictionary""" + + +class SSCursor(Cursor): + """ + Unbuffered Cursor, mainly useful for queries that return a lot of data, + or for connections to remote servers over a slow network. + + Instead of copying every row of data into a buffer, this will fetch + rows as needed. The upside of this is the client uses much less memory, + and rows are returned much faster when traveling over a slow network + or if the result set is very big. + + There are limitations, though. The MySQL protocol doesn't support + returning the total number of rows, so the only way to tell how many rows + there are is to iterate over every row returned. Also, it currently isn't + possible to scroll backwards, as only the current row is held in memory. + """ + + _defer_warnings = True + + def _conv_row(self, row): + return row + + def close(self): + conn = self.connection + if conn is None: + return + + if self._result is not None and self._result is conn._result: + self._result._finish_unbuffered_query() + + try: + while self.nextset(): + pass + finally: + self.connection = None + + __del__ = close + + def _query(self, q): + conn = self._get_db() + self._last_executed = q + self._clear_result() + conn.query(q, unbuffered=True) + self._do_get_result() + return self.rowcount + + def nextset(self): + return self._nextset(unbuffered=True) + + def read_next(self): + """Read next row""" + return self._conv_row(self._result._read_rowdata_packet_unbuffered()) + + def fetchone(self): + """Fetch next row""" + self._check_executed() + row = self.read_next() + if row is None: + self._show_warnings() + return None + self.rownumber += 1 + return row + + def fetchall(self): + """ + Fetch all, as per MySQLdb. Pretty useless for large queries, as + it is buffered. See fetchall_unbuffered(), if you want an unbuffered + generator version of this method. + """ + return list(self.fetchall_unbuffered()) + + def fetchall_unbuffered(self): + """ + Fetch all, implemented as a generator, which isn't to standard, + however, it doesn't make sense to return everything in a list, as that + would use ridiculous memory for large result sets. + """ + return iter(self.fetchone, None) + + def __iter__(self): + return self.fetchall_unbuffered() + + def fetchmany(self, size=None): + """Fetch many""" + self._check_executed() + if size is None: + size = self.arraysize + + rows = [] + for i in range_type(size): + row = self.read_next() + if row is None: + self._show_warnings() + break + rows.append(row) + self.rownumber += 1 + return rows + + def scroll(self, value, mode='relative'): + self._check_executed() + + if mode == 'relative': + if value < 0: + raise err.NotSupportedError( + "Backwards scrolling not supported by this cursor") + + for _ in range_type(value): + self.read_next() + self.rownumber += value + elif mode == 'absolute': + if value < self.rownumber: + raise err.NotSupportedError( + "Backwards scrolling not supported by this cursor") + + end = value - self.rownumber + for _ in range_type(end): + self.read_next() + self.rownumber = value + else: + raise err.ProgrammingError("unknown scroll mode %s" % mode) + + +class SSDictCursor(DictCursorMixin, SSCursor): + """An unbuffered cursor, which returns results as a dictionary""" diff --git a/utill/db/pymysql/err.py b/utill/db/pymysql/err.py new file mode 100644 index 0000000..fbc6055 --- /dev/null +++ b/utill/db/pymysql/err.py @@ -0,0 +1,109 @@ +import struct + +from .constants import ER + + +class MySQLError(Exception): + """Exception related to operation with MySQL.""" + + +class Warning(Warning, MySQLError): + """Exception raised for important warnings like data truncations + while inserting, etc.""" + + +class Error(MySQLError): + """Exception that is the base class of all other error exceptions + (not Warning).""" + + +class InterfaceError(Error): + """Exception raised for errors that are related to the database + interface rather than the database itself.""" + + +class DatabaseError(Error): + """Exception raised for errors that are related to the + database.""" + + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the + processed data like division by zero, numeric value out of range, + etc.""" + + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's + operation and not necessarily under the control of the programmer, + e.g. an unexpected disconnect occurs, the data source name is not + found, a transaction could not be processed, a memory allocation + error occurred during processing, etc.""" + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database + is affected, e.g. a foreign key check fails, duplicate key, + etc.""" + + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal + error, e.g. the cursor is not valid anymore, the transaction is + out of sync, etc.""" + + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors, e.g. table not found + or already exists, syntax error in the SQL statement, wrong number + of parameters specified, etc.""" + + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used + which is not supported by the database, e.g. requesting a + .rollback() on a connection that does not support transaction or + has transactions turned off.""" + + +error_map = {} + + +def _map_error(exc, *errors): + for error in errors: + error_map[error] = exc + + +_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, + ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, + ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, + ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, + ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION, + ER.WRONG_DB_NAME, ER.WRONG_COLUMN_NAME, + ) +_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, + ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, + ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) +_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW, + ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2, + ER.CANNOT_ADD_FOREIGN, ER.BAD_NULL_ERROR) +_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, + ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) +_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR, + ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR, + ER.COLUMNACCESS_DENIED_ERROR, ER.CONSTRAINT_FAILED, ER.LOCK_DEADLOCK) + + +del _map_error, ER + + +def raise_mysql_exception(data): + errno = struct.unpack('= 2 and value[0] == value[-1] == quote: + return value[1:-1] + return value + + def get(self, section, option): + value = configparser.RawConfigParser.get(self, section, option) + return self.__remove_quotes(value) diff --git a/utill/db/pymysql/protocol.py b/utill/db/pymysql/protocol.py new file mode 100644 index 0000000..8ccf7c4 --- /dev/null +++ b/utill/db/pymysql/protocol.py @@ -0,0 +1,341 @@ +# Python implementation of low level MySQL client-server protocol +# http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +from __future__ import print_function +from .charset import MBLENGTH +from ._compat import PY2, range_type +from .constants import FIELD_TYPE, SERVER_STATUS +from . import err +from .util import byte2int + +import struct +import sys + + +DEBUG = False + +NULL_COLUMN = 251 +UNSIGNED_CHAR_COLUMN = 251 +UNSIGNED_SHORT_COLUMN = 252 +UNSIGNED_INT24_COLUMN = 253 +UNSIGNED_INT64_COLUMN = 254 + + +def dump_packet(data): # pragma: no cover + def printable(data): + if 32 <= byte2int(data) < 127: + if isinstance(data, int): + return chr(data) + return data + return '.' + + try: + print("packet length:", len(data)) + for i in range(1, 7): + f = sys._getframe(i) + print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno)) + print("-" * 66) + except ValueError: + pass + dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)] + for d in dump_data: + print(' '.join("{:02X}".format(byte2int(x)) for x in d) + + ' ' * (16 - len(d)) + ' ' * 2 + + ''.join(printable(x) for x in d)) + print("-" * 66) + print() + + +class MysqlPacket(object): + """Representation of a MySQL response packet. + + Provides an interface for reading/parsing the packet results. + """ + __slots__ = ('_position', '_data') + + def __init__(self, data, encoding): + self._position = 0 + self._data = data + + def get_all_data(self): + return self._data + + def read(self, size): + """Read the first 'size' bytes in packet and advance cursor past them.""" + result = self._data[self._position:(self._position+size)] + if len(result) != size: + error = ('Result length not requested length:\n' + 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' + % (size, len(result), self._position, len(self._data))) + if DEBUG: + print(error) + self.dump() + raise AssertionError(error) + self._position += size + return result + + def read_all(self): + """Read all remaining data in the packet. + + (Subsequent read() will return errors.) + """ + result = self._data[self._position:] + self._position = None # ensure no subsequent read() + return result + + def advance(self, length): + """Advance the cursor in data buffer 'length' bytes.""" + new_position = self._position + length + if new_position < 0 or new_position > len(self._data): + raise Exception('Invalid advance amount (%s) for cursor. ' + 'Position=%s' % (length, new_position)) + self._position = new_position + + def rewind(self, position=0): + """Set the position of the data buffer cursor to 'position'.""" + if position < 0 or position > len(self._data): + raise Exception("Invalid position to rewind cursor to: %s." % position) + self._position = position + + def get_bytes(self, position, length=1): + """Get 'length' bytes starting at 'position'. + + Position is start of payload (first four packet header bytes are not + included) starting at index '0'. + + No error checking is done. If requesting outside end of buffer + an empty string (or string shorter than 'length') may be returned! + """ + return self._data[position:(position+length)] + + if PY2: + def read_uint8(self): + result = ord(self._data[self._position]) + self._position += 1 + return result + else: + def read_uint8(self): + result = self._data[self._position] + self._position += 1 + return result + + def read_uint16(self): + result = struct.unpack_from('= 7 + + def is_eof_packet(self): + # http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet + # Caution: \xFE may be LengthEncodedInteger. + # If \xFE is LengthEncodedInteger header, 8bytes followed. + return self._data[0:1] == b'\xfe' and len(self._data) < 9 + + def is_auth_switch_request(self): + # http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest + return self._data[0:1] == b'\xfe' + + def is_extra_auth_data(self): + # https://dev.mysql.com/doc/internals/en/successful-authentication.html + return self._data[0:1] == b'\x01' + + def is_resultset_packet(self): + field_count = ord(self._data[0:1]) + return 1 <= field_count <= 250 + + def is_load_local_packet(self): + return self._data[0:1] == b'\xfb' + + def is_error_packet(self): + return self._data[0:1] == b'\xff' + + def check_error(self): + if self.is_error_packet(): + self.rewind() + self.advance(1) # field_count == error (we already know that) + errno = self.read_uint16() + if DEBUG: print("errno =", errno) + err.raise_mysql_exception(self._data) + + def dump(self): + dump_packet(self._data) + + +class FieldDescriptorPacket(MysqlPacket): + """A MysqlPacket that represents a specific column's metadata in the result. + + Parsing is automatically done and the results are exported via public + attributes on the class such as: db, table_name, name, length, type_code. + """ + + def __init__(self, data, encoding): + MysqlPacket.__init__(self, data, encoding) + self._parse_field_descriptor(encoding) + + def _parse_field_descriptor(self, encoding): + """Parse the 'Field Descriptor' (Metadata) packet. + + This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). + """ + self.catalog = self.read_length_coded_string() + self.db = self.read_length_coded_string() + self.table_name = self.read_length_coded_string().decode(encoding) + self.org_table = self.read_length_coded_string().decode(encoding) + self.name = self.read_length_coded_string().decode(encoding) + self.org_name = self.read_length_coded_string().decode(encoding) + self.charsetnr, self.length, self.type_code, self.flags, self.scale = ( + self.read_struct(' "6000" and name LIKE "%超" ) + + "id","eq",1 表示 id = '1' + + eq 等于 + neq 不等于 + gt 大于 + egt 大于等于 + lt 小于 + elt 小于等于 + like LIKE + """ + self.__where=where + self.__wheres=wheres + # print(len(self.__wheres)) + return self + __field='*' + def field(self,field = "*"): + """设置过滤显示条件 + + 参数 field:str 字符串 + """ + self.__field=field + return self + __limit=[] + def limit(self,offset, length = None): + """设置查询数量 + + 参数 offset:int 起始位置 + + 参数 length:int 查询数量 + """ + self.__limit=[offset,length] + return self + def page(self,pagenow=1, length = 20): + """设置分页查询 + + 参数 pagenow:int 页码 + + 参数 length:int 查询数量 + """ + offset=(pagenow-1)*length + self.__limit=[offset,length] + return self + __order=None + __order1=None + def order(self,strs=None,*strs1): + """设置排序查询 + + 传入方式: + + "id desc" + + "id",'name','appkey','asc' + + "id",'name','appkey' 不包含asc或desc的情况下 默认是desc + + ['id','taskid',{"task_id":"desc"}] + """ + self.__order=strs + self.__order1=strs1 + return self + __group=None + __group1=None + def group(self,strs=None,*strs1): + """设置分组查询 + + 传入方式: + + "id,name" + + "id","name" + """ + self.__group=strs + self.__group1=strs1 + return self + __having=None + def having(self,strs=None): + """用于配合group方法完成从分组的结果中筛选(通常是聚合条件)数据 + + 参数 strs:string 如:"count(time)>3" + """ + self.__having=strs + return self + + __alias=None + def alias(self,strs=None): + """用于设置当前数据表的别名,便于使用其他的连贯操作例如join方法等。 + + 参数 strs:string 默认当前表作为别名 + """ + if strs: + self.__alias=strs + else: + self.__alias=self.__table + return self + __join=None + __joinstr='' + def join(self,strs,on=None,types='INNER'): + """用于根据两个或多个表中的列之间的关系,从这些表中查询数据 + + 参数 strs string 如:"test t1" test表设置别名t1 + + 参数 on string 如:"t1.id=t2.pid" 设置连接条件 + + 参数 types 支持INNER、LEFT、RIGHT、FULL 默认INNER + + """ + joinstr='' + if strs and on: + joinstr=joinstr+types+" JOIN "+strs+" ON "+on+" " + if joinstr: + self.__joinstr=self.__joinstr+joinstr + return self + __distinct=None + def distinct(self,bools=None): + "用于返回唯一不同的值,配合field方法使用生效,消除所有重复的记录,并只获取唯一一次记录。" + self.__distinct=bools + return self + __lock=None + def lock(self,strs=None): + """用于数据库的锁机制,在查询或者执行操作的时候使用 (暂未实现) + + 排他锁 (Exclusive lock) + + 共享锁 (Shared lock) + + 参数 strs 如:True表示自动在生成的SQL语句最后加上FOR UPDATE, + + + """ + # self.__lock=strs + return self + def __setsql(self,types=None,data = {}): + """生成sql语句""" + if types==None: + self.__sql="SELECT" + if self.__distinct and self.__field: + self.__sql=self.__sql+" DISTINCT" + if self.__alias: + self.__sql=self.__sql+" %s FROM %s %s" % (self.__field,self.__table,self.__alias) + else: + self.__sql=self.__sql+" %s FROM %s" % (self.__field,self.__table) + elif types=='count': + self.__sql="SELECT COUNT(%s) FROM %s" % (self.__field,self.__table) + elif types=='max': + self.__sql="SELECT MAX(%s) FROM %s" % (self.__field,self.__table) + elif types=='min': + self.__sql="SELECT MIN(%s) FROM %s" % (self.__field,self.__table) + elif types=='avg': + self.__sql="SELECT AVG(%s) FROM %s" % (self.__field,self.__table) + elif types=='sum': + self.__sql="SELECT SUM(%s) FROM %s" % (self.__field,self.__table) + elif types=='update': + strs='' + for k in data: + if isinstance(data[k],str): + strs=strs+" %s = '%s' ," % (k,self.escape_string(data[k])) + else: + strs=strs+" %s = %s ," % (k,data[k]) + strs=strs[:-1] + self.__sql="UPDATE %s SET %s" % (self.__table,strs) + # print(self.__sql) + elif types=='delete': + self.__sql="DELETE FROM %s" % (self.__table) + elif types=='insert': + if isinstance(data,dict): + strs='' + val='' + for k in data: + strs=strs+"%s," % k + if isinstance(data[k],str): + val=val+"'%s'," % self.escape_string(data[k]) + else: + val=val+"%s," % data[k] + strs=strs[:-1] + val=val[:-1] + self.__sql="INSERT INTO "+str(self.__table)+" ("+strs+") VALUES ("+val+")" + # print(self.__sql) + elif isinstance(data,list): + strs='' + val='(' + for k in data[0]: + strs=strs+" , "+k + for k in data: + for j in k: + if isinstance(k[j],str): + val=val+"'"+str(k[j])+"'," + else: + val=val+str(k[j])+"," + val=val[:-1] + val=val+"),(" + val=val[:-2] + self.__sql="INSERT INTO "+str(self.__table)+" ("+strs[3:]+") VALUES "+val + + if self.__joinstr: + # print(self.__sql) + self.__sql=self.__sql+" "+self.__joinstr + if self.__where: + if isinstance(self.__where,str): + if self.__wheres: + if len(self.__wheres) == 2: + if isinstance(self.__wheres[1],list): + self.__sql=self.__sql + " WHERE %s %s (" % (self.__where,self.__operator(self.__wheres[0])) + for k in self.__wheres[1]: + self.__sql=self.__sql+str(k)+"," + self.__sql=self.__sql[:-1]+")" + else: + self.__sql=self.__sql + " WHERE %s %s '%s'" % (self.__where,self.__operator(self.__wheres[0]),self.__wheres[1]) + elif len(self.__wheres) > 2: + if self.__wheres[0]=='in': + strs=str(self.__wheres[1]) + i=0 + for k in self.__wheres: + if i > 1: + strs=strs+","+str(k) + i=i+1 + self.__sql=self.__sql + " WHERE %s in (%s)" % (self.__where,strs) + else: + self.__sql=self.__sql + " WHERE %s = '%s'" % (self.__where,self.__wheres[0]) + else: + self.__sql=self.__sql + " WHERE %s" % self.__where + elif isinstance(self.__where,list): + self.__sql=self.__sql + " WHERE %s" % self.__listTrans() + else: + print("参数where类型错误",type(self.__where),self.__where) + if self.__order: + s='' + if isinstance(self.__order,list): + for strs in self.__order: + if isinstance(strs,str): + s=s+strs+"," + else: + pass + for key in strs: + s=s+key+" "+strs[key] + s=s+"," + s=s[:-1] + if isinstance(self.__order,str): + if self.__order1: + if len(self.__order1) > 1: + if self.__order1[len(self.__order1)-1] == 'desc' or self.__order1[len(self.__order1)-1] == 'asc': + i=0 + while i 57 and + buf[0] == 0x50 and buf[1] == 0x4B and + buf[2] == 0x3 and buf[3] == 0x4 and + buf[30] == 0x6D and buf[31] == 0x69 and + buf[32] == 0x6D and buf[33] == 0x65 and + buf[34] == 0x74 and buf[35] == 0x79 and + buf[36] == 0x70 and buf[37] == 0x65 and + buf[38] == 0x61 and buf[39] == 0x70 and + buf[40] == 0x70 and buf[41] == 0x6C and + buf[42] == 0x69 and buf[43] == 0x63 and + buf[44] == 0x61 and buf[45] == 0x74 and + buf[46] == 0x69 and buf[47] == 0x6F and + buf[48] == 0x6E and buf[49] == 0x2F and + buf[50] == 0x65 and buf[51] == 0x70 and + buf[52] == 0x75 and buf[53] == 0x62 and + buf[54] == 0x2B and buf[55] == 0x7A and + buf[56] == 0x69 and buf[57] == 0x70) + + +class Zip(Type): + """ + Implements the Zip archive type matcher. + """ + MIME = 'application/zip' + EXTENSION = 'zip' + + def __init__(self): + super(Zip, self).__init__( + mime=Zip.MIME, + extension=Zip.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x50 and buf[1] == 0x4B and + (buf[2] == 0x3 or buf[2] == 0x5 or + buf[2] == 0x7) and + (buf[3] == 0x4 or buf[3] == 0x6 or + buf[3] == 0x8)) + + +class Tar(Type): + """ + Implements the Tar archive type matcher. + """ + MIME = 'application/x-tar' + EXTENSION = 'tar' + + def __init__(self): + super(Tar, self).__init__( + mime=Tar.MIME, + extension=Tar.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 261 and + buf[257] == 0x75 and + buf[258] == 0x73 and + buf[259] == 0x74 and + buf[260] == 0x61 and + buf[261] == 0x72) + + +class Rar(Type): + """ + Implements the RAR archive type matcher. + """ + MIME = 'application/x-rar-compressed' + EXTENSION = 'rar' + + def __init__(self): + super(Rar, self).__init__( + mime=Rar.MIME, + extension=Rar.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 6 and + buf[0] == 0x52 and + buf[1] == 0x61 and + buf[2] == 0x72 and + buf[3] == 0x21 and + buf[4] == 0x1A and + buf[5] == 0x7 and + (buf[6] == 0x0 or + buf[6] == 0x1)) + + +class Gz(Type): + """ + Implements the GZ archive type matcher. + """ + MIME = 'application/gzip' + EXTENSION = 'gz' + + def __init__(self): + super(Gz, self).__init__( + mime=Gz.MIME, + extension=Gz.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 2 and + buf[0] == 0x1F and + buf[1] == 0x8B and + buf[2] == 0x8) + + +class Bz2(Type): + """ + Implements the BZ2 archive type matcher. + """ + MIME = 'application/x-bzip2' + EXTENSION = 'bz2' + + def __init__(self): + super(Bz2, self).__init__( + mime=Bz2.MIME, + extension=Bz2.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 2 and + buf[0] == 0x42 and + buf[1] == 0x5A and + buf[2] == 0x68) + + +class SevenZ(Type): + """ + Implements the SevenZ (7z) archive type matcher. + """ + MIME = 'application/x-7z-compressed' + EXTENSION = '7z' + + def __init__(self): + super(SevenZ, self).__init__( + mime=SevenZ.MIME, + extension=SevenZ.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 5 and + buf[0] == 0x37 and + buf[1] == 0x7A and + buf[2] == 0xBC and + buf[3] == 0xAF and + buf[4] == 0x27 and + buf[5] == 0x1C) + + +class Pdf(Type): + """ + Implements the PDF archive type matcher. + """ + MIME = 'application/pdf' + EXTENSION = 'pdf' + + def __init__(self): + super(Pdf, self).__init__( + mime=Pdf.MIME, + extension=Pdf.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x25 and + buf[1] == 0x50 and + buf[2] == 0x44 and + buf[3] == 0x46) + + +class Exe(Type): + """ + Implements the EXE archive type matcher. + """ + MIME = 'application/x-msdownload' + EXTENSION = 'exe' + + def __init__(self): + super(Exe, self).__init__( + mime=Exe.MIME, + extension=Exe.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 1 and + buf[0] == 0x4D and + buf[1] == 0x5A) + + +class Swf(Type): + """ + Implements the SWF archive type matcher. + """ + MIME = 'application/x-shockwave-flash' + EXTENSION = 'swf' + + def __init__(self): + super(Swf, self).__init__( + mime=Swf.MIME, + extension=Swf.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 2 and + (buf[0] == 0x43 or + buf[0] == 0x46) and + buf[1] == 0x57 and + buf[2] == 0x53) + + +class Rtf(Type): + """ + Implements the RTF archive type matcher. + """ + MIME = 'application/rtf' + EXTENSION = 'rtf' + + def __init__(self): + super(Rtf, self).__init__( + mime=Rtf.MIME, + extension=Rtf.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 4 and + buf[0] == 0x7B and + buf[1] == 0x5C and + buf[2] == 0x72 and + buf[3] == 0x74 and + buf[4] == 0x66) + + +class Nes(Type): + """ + Implements the NES archive type matcher. + """ + MIME = 'application/x-nintendo-nes-rom' + EXTENSION = 'nes' + + def __init__(self): + super(Nes, self).__init__( + mime=Nes.MIME, + extension=Nes.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x4E and + buf[1] == 0x45 and + buf[2] == 0x53 and + buf[3] == 0x1A) + + +class Crx(Type): + """ + Implements the CRX archive type matcher. + """ + MIME = 'application/x-google-chrome-extension' + EXTENSION = 'crx' + + def __init__(self): + super(Crx, self).__init__( + mime=Crx.MIME, + extension=Crx.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x43 and + buf[1] == 0x72 and + buf[2] == 0x32 and + buf[3] == 0x34) + + +class Cab(Type): + """ + Implements the CAB archive type matcher. + """ + MIME = 'application/vnd.ms-cab-compressed' + EXTENSION = 'cab' + + def __init__(self): + super(Cab, self).__init__( + mime=Cab.MIME, + extension=Cab.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + ((buf[0] == 0x4D and + buf[1] == 0x53 and + buf[2] == 0x43 and + buf[3] == 0x46) or + (buf[0] == 0x49 and + buf[1] == 0x53 and + buf[2] == 0x63 and + buf[3] == 0x28))) + + +class Eot(Type): + """ + Implements the EOT archive type matcher. + """ + MIME = 'application/octet-stream' + EXTENSION = 'eot' + + def __init__(self): + super(Eot, self).__init__( + mime=Eot.MIME, + extension=Eot.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 35 and + buf[34] == 0x4C and + buf[35] == 0x50 and + ((buf[8] == 0x02 and + buf[9] == 0x00 and + buf[10] == 0x01) or + (buf[8] == 0x01 and + buf[9] == 0x00 and + buf[10] == 0x00) or + (buf[8] == 0x02 and + buf[9] == 0x00 and + buf[10] == 0x02))) + + +class Ps(Type): + """ + Implements the PS archive type matcher. + """ + MIME = 'application/postscript' + EXTENSION = 'ps' + + def __init__(self): + super(Ps, self).__init__( + mime=Ps.MIME, + extension=Ps.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 1 and + buf[0] == 0x25 and + buf[1] == 0x21) + + +class Xz(Type): + """ + Implements the XS archive type matcher. + """ + MIME = 'application/x-xz' + EXTENSION = 'xz' + + def __init__(self): + super(Xz, self).__init__( + mime=Xz.MIME, + extension=Xz.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 5 and + buf[0] == 0xFD and + buf[1] == 0x37 and + buf[2] == 0x7A and + buf[3] == 0x58 and + buf[4] == 0x5A and + buf[5] == 0x00) + + +class Sqlite(Type): + """ + Implements the Sqlite DB archive type matcher. + """ + MIME = 'application/x-sqlite3' + EXTENSION = 'sqlite' + + def __init__(self): + super(Sqlite, self).__init__( + mime=Sqlite.MIME, + extension=Sqlite.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x53 and + buf[1] == 0x51 and + buf[2] == 0x4C and + buf[3] == 0x69) + + +class Deb(Type): + """ + Implements the DEB archive type matcher. + """ + MIME = 'application/x-deb' + EXTENSION = 'deb' + + def __init__(self): + super(Deb, self).__init__( + mime=Deb.MIME, + extension=Deb.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 20 and + buf[0] == 0x21 and + buf[1] == 0x3C and + buf[2] == 0x61 and + buf[3] == 0x72 and + buf[4] == 0x63 and + buf[5] == 0x68 and + buf[6] == 0x3E and + buf[7] == 0x0A and + buf[8] == 0x64 and + buf[9] == 0x65 and + buf[10] == 0x62 and + buf[11] == 0x69 and + buf[12] == 0x61 and + buf[13] == 0x6E and + buf[14] == 0x2D and + buf[15] == 0x62 and + buf[16] == 0x69 and + buf[17] == 0x6E and + buf[18] == 0x61 and + buf[19] == 0x72 and + buf[20] == 0x79) + + +class Ar(Type): + """ + Implements the AR archive type matcher. + """ + MIME = 'application/x-unix-archive' + EXTENSION = 'ar' + + def __init__(self): + super(Ar, self).__init__( + mime=Ar.MIME, + extension=Ar.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 6 and + buf[0] == 0x21 and + buf[1] == 0x3C and + buf[2] == 0x61 and + buf[3] == 0x72 and + buf[4] == 0x63 and + buf[5] == 0x68 and + buf[6] == 0x3E) + + +class Z(Type): + """ + Implements the Z archive type matcher. + """ + MIME = 'application/x-compress' + EXTENSION = 'Z' + + def __init__(self): + super(Z, self).__init__( + mime=Z.MIME, + extension=Z.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 1 and + ((buf[0] == 0x1F and + buf[1] == 0xA0) or + (buf[0] == 0x1F and + buf[1] == 0x9D))) + + +class Lz(Type): + """ + Implements the Lz archive type matcher. + """ + MIME = 'application/x-lzip' + EXTENSION = 'lz' + + def __init__(self): + super(Lz, self).__init__( + mime=Lz.MIME, + extension=Lz.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x4C and + buf[1] == 0x5A and + buf[2] == 0x49 and + buf[3] == 0x50) diff --git a/utill/filetype/types/audio.py b/utill/filetype/types/audio.py new file mode 100644 index 0000000..5dafba5 --- /dev/null +++ b/utill/filetype/types/audio.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .base import Type + + +class Midi(Type): + """ + Implements the Midi audio type matcher. + """ + MIME = 'audio/midi' + EXTENSION = 'midi' + + def __init__(self): + super(Midi, self).__init__( + mime=Midi.MIME, + extension=Midi.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x4D and + buf[1] == 0x54 and + buf[2] == 0x68 and + buf[3] == 0x64) + + +class Mp3(Type): + """ + Implements the MP3 audio type matcher. + """ + MIME = 'audio/mpeg' + EXTENSION = 'mp3' + + def __init__(self): + super(Mp3, self).__init__( + mime=Mp3.MIME, + extension=Mp3.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 2 and + ((buf[0] == 0x49 and + buf[1] == 0x44 and + buf[2] == 0x33) or + (buf[0] == 0xFF and + buf[1] == 0xfb))) + + +class M4a(Type): + """ + Implements the M4A audio type matcher. + """ + MIME = 'audio/m4a' + EXTENSION = 'm4a' + + def __init__(self): + super(M4a, self).__init__( + mime=M4a.MIME, + extension=M4a.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 10 and + ((buf[4] == 0x66 and + buf[5] == 0x74 and + buf[6] == 0x79 and + buf[7] == 0x70 and + buf[8] == 0x4D and + buf[9] == 0x34 and + buf[10] == 0x41) or + (buf[0] == 0x4D and + buf[1] == 0x34 and + buf[2] == 0x41 and + buf[3] == 0x20))) + + +class Ogg(Type): + """ + Implements the OGG audio type matcher. + """ + MIME = 'audio/ogg' + EXTENSION = 'ogg' + + def __init__(self): + super(Ogg, self).__init__( + mime=Ogg.MIME, + extension=Ogg.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x4F and + buf[1] == 0x67 and + buf[2] == 0x67 and + buf[3] == 0x53) + + +class Flac(Type): + """ + Implements the FLAC audio type matcher. + """ + MIME = 'audio/x-flac' + EXTENSION = 'flac' + + def __init__(self): + super(Flac, self).__init__( + mime=Flac.MIME, + extension=Flac.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x66 and + buf[1] == 0x4C and + buf[2] == 0x61 and + buf[3] == 0x43) + + +class Wav(Type): + """ + Implements the WAV audio type matcher. + """ + MIME = 'audio/x-wav' + EXTENSION = 'wav' + + def __init__(self): + super(Wav, self).__init__( + mime=Wav.MIME, + extension=Wav.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 11 and + buf[0] == 0x52 and + buf[1] == 0x49 and + buf[2] == 0x46 and + buf[3] == 0x46 and + buf[8] == 0x57 and + buf[9] == 0x41 and + buf[10] == 0x56 and + buf[11] == 0x45) + + +class Amr(Type): + """ + Implements the AMR audio type matcher. + """ + MIME = 'audio/amr' + EXTENSION = 'amr' + + def __init__(self): + super(Amr, self).__init__( + mime=Amr.MIME, + extension=Amr.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 11 and + buf[0] == 0x23 and + buf[1] == 0x21 and + buf[2] == 0x41 and + buf[3] == 0x4D and + buf[4] == 0x52 and + buf[5] == 0x0A) diff --git a/utill/filetype/types/base.py b/utill/filetype/types/base.py new file mode 100644 index 0000000..8213da1 --- /dev/null +++ b/utill/filetype/types/base.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + + +class Type(object): + """ + Represents the file type object inherited by + specific file type matchers. + Provides convenient accessor and helper methods. + """ + def __init__(self, mime, extension): + self.__mime = mime + self.__extension = extension + + @property + def mime(self): + return self.__mime + + @property + def extension(self): + return self.__extension + + @property + def is_extension(self, extension): + return self.__extension is extension + + @property + def is_mime(self, mime): + return self.__mime is mime + + def match(self, buf): + raise NotImplementedError diff --git a/utill/filetype/types/font.py b/utill/filetype/types/font.py new file mode 100644 index 0000000..bdecf39 --- /dev/null +++ b/utill/filetype/types/font.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .base import Type + + +class Woff(Type): + """ + Implements the WOFF font type matcher. + """ + MIME = 'application/font-woff' + EXTENSION = 'woff' + + def __init__(self): + super(Woff, self).__init__( + mime=Woff.MIME, + extension=Woff.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 7 and + buf[0] == 0x77 and + buf[1] == 0x4F and + buf[2] == 0x46 and + buf[3] == 0x46 and + buf[4] == 0x00 and + buf[5] == 0x01 and + buf[6] == 0x00 and + buf[7] == 0x00) + + +class Woff2(Type): + """ + Implements the WOFF2 font type matcher. + """ + MIME = 'application/font-woff' + EXTENSION = 'woff2' + + def __init__(self): + super(Woff2, self).__init__( + mime=Woff2.MIME, + extension=Woff2.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 7 and + buf[0] == 0x77 and + buf[1] == 0x4F and + buf[2] == 0x46 and + buf[3] == 0x32 and + buf[4] == 0x00 and + buf[5] == 0x01 and + buf[6] == 0x00 and + buf[7] == 0x00) + + +class Ttf(Type): + """ + Implements the TTF font type matcher. + """ + MIME = 'application/font-sfnt' + EXTENSION = 'ttf' + + def __init__(self): + super(Ttf, self).__init__( + mime=Ttf.MIME, + extension=Ttf.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 4 and + buf[0] == 0x00 and + buf[1] == 0x01 and + buf[2] == 0x00 and + buf[3] == 0x00 and + buf[4] == 0x00) + + +class Otf(Type): + """ + Implements the OTF font type matcher. + """ + MIME = 'application/font-sfnt' + EXTENSION = 'otf' + + def __init__(self): + super(Otf, self).__init__( + mime=Otf.MIME, + extension=Otf.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 4 and + buf[0] == 0x4F and + buf[1] == 0x54 and + buf[2] == 0x54 and + buf[3] == 0x4F and + buf[4] == 0x00) diff --git a/utill/filetype/types/image.py b/utill/filetype/types/image.py new file mode 100644 index 0000000..1fd6e17 --- /dev/null +++ b/utill/filetype/types/image.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .base import Type +from .isobmff import IsoBmff + + +class Jpeg(Type): + """ + Implements the JPEG image type matcher. + """ + MIME = 'image/jpeg' + EXTENSION = 'jpg' + + def __init__(self): + super(Jpeg, self).__init__( + mime=Jpeg.MIME, + extension=Jpeg.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 2 and + buf[0] == 0xFF and + buf[1] == 0xD8 and + buf[2] == 0xFF) + + +class Jpx(Type): + """ + Implements the JPEG2000 image type matcher. + """ + + MIME = "image/jpx" + EXTENSION = "jpx" + + def __init__(self): + super(Jpx, self).__init__(mime=Jpx.MIME, extension=Jpx.EXTENSION) + + def match(self, buf): + return ( + len(buf) > 50 + and buf[0] == 0x00 + and buf[1] == 0x00 + and buf[2] == 0x00 + and buf[3] == 0x0C + and buf[16:24] == b"ftypjp2 " + ) + + +class Png(Type): + """ + Implements the PNG image type matcher. + """ + MIME = 'image/png' + EXTENSION = 'png' + + def __init__(self): + super(Png, self).__init__( + mime=Png.MIME, + extension=Png.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x89 and + buf[1] == 0x50 and + buf[2] == 0x4E and + buf[3] == 0x47) + + +class Gif(Type): + """ + Implements the GIF image type matcher. + """ + MIME = 'image/gif' + EXTENSION = 'gif' + + def __init__(self): + super(Gif, self).__init__( + mime=Gif.MIME, + extension=Gif.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 2 and + buf[0] == 0x47 and + buf[1] == 0x49 and + buf[2] == 0x46) + + +class Webp(Type): + """ + Implements the WEBP image type matcher. + """ + MIME = 'image/webp' + EXTENSION = 'webp' + + def __init__(self): + super(Webp, self).__init__( + mime=Webp.MIME, + extension=Webp.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 11 and + buf[8] == 0x57 and + buf[9] == 0x45 and + buf[10] == 0x42 and + buf[11] == 0x50) + + +class Cr2(Type): + """ + Implements the CR2 image type matcher. + """ + MIME = 'image/x-canon-cr2' + EXTENSION = 'cr2' + + def __init__(self): + super(Cr2, self).__init__( + mime=Cr2.MIME, + extension=Cr2.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 9 and + ((buf[0] == 0x49 and buf[1] == 0x49 and + buf[2] == 0x2A and buf[3] == 0x0) or + (buf[0] == 0x4D and buf[1] == 0x4D and + buf[2] == 0x0 and buf[3] == 0x2A)) and + buf[8] == 0x43 and buf[9] == 0x52) + + +class Tiff(Type): + """ + Implements the TIFF image type matcher. + """ + MIME = 'image/tiff' + EXTENSION = 'tif' + + def __init__(self): + super(Tiff, self).__init__( + mime=Tiff.MIME, + extension=Tiff.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 3 and + ((buf[0] == 0x49 and buf[1] == 0x49 and + buf[2] == 0x2A and buf[3] == 0x0) or + (buf[0] == 0x4D and buf[1] == 0x4D and + buf[2] == 0x0 and buf[3] == 0x2A))) + + +class Bmp(Type): + """ + Implements the BMP image type matcher. + """ + MIME = 'image/bmp' + EXTENSION = 'bmp' + + def __init__(self): + super(Bmp, self).__init__( + mime=Bmp.MIME, + extension=Bmp.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 1 and + buf[0] == 0x42 and + buf[1] == 0x4D) + + +class Jxr(Type): + """ + Implements the JXR image type matcher. + """ + MIME = 'image/vnd.ms-photo' + EXTENSION = 'jxr' + + def __init__(self): + super(Jxr, self).__init__( + mime=Jxr.MIME, + extension=Jxr.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 2 and + buf[0] == 0x49 and + buf[1] == 0x49 and + buf[2] == 0xBC) + + +class Psd(Type): + """ + Implements the PSD image type matcher. + """ + MIME = 'image/vnd.adobe.photoshop' + EXTENSION = 'psd' + + def __init__(self): + super(Psd, self).__init__( + mime=Psd.MIME, + extension=Psd.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x38 and + buf[1] == 0x42 and + buf[2] == 0x50 and + buf[3] == 0x53) + + +class Ico(Type): + """ + Implements the ICO image type matcher. + """ + MIME = 'image/x-icon' + EXTENSION = 'ico' + + def __init__(self): + super(Ico, self).__init__( + mime=Ico.MIME, + extension=Ico.EXTENSION, + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x00 and + buf[1] == 0x00 and + buf[2] == 0x01 and + buf[3] == 0x00) + + +class Heic(IsoBmff): + """ + Implements the HEIC image type matcher. + """ + MIME = 'image/heic' + EXTENSION = 'heic' + + def __init__(self): + super(Heic, self).__init__( + mime=Heic.MIME, + extension=Heic.EXTENSION + ) + + def match(self, buf): + if not self._is_isobmff(buf): + return False + + major_brand, minor_version, compatible_brands = self._get_ftyp(buf) + if major_brand == 'heic': + return True + if major_brand in ['mif1', 'msf1'] and 'heic' in compatible_brands: + return True + return False + + +class Dcm(Type): + + MIME = 'application/dicom' + EXTENSION = 'dcm' + OFFSET = 128 + + def __init__(self): + super(Dcm, self).__init__( + mime=Dcm.MIME, + extension=Dcm.EXTENSION + ) + + def match(self, buf): + return (len(buf) > Dcm.OFFSET + 4 and + buf[Dcm.OFFSET + 0] == 0x44 and + buf[Dcm.OFFSET + 1] == 0x49 and + buf[Dcm.OFFSET + 2] == 0x43 and + buf[Dcm.OFFSET + 3] == 0x4D) diff --git a/utill/filetype/types/isobmff.py b/utill/filetype/types/isobmff.py new file mode 100644 index 0000000..3d5a1fc --- /dev/null +++ b/utill/filetype/types/isobmff.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +import codecs + +from .base import Type + + +class IsoBmff(Type): + """ + Implements the ISO-BMFF base type. + """ + def __init__(self, mime, extension): + super(IsoBmff, self).__init__( + mime=mime, + extension=extension + ) + + def _is_isobmff(self, buf): + if len(buf) < 16 or buf[4:8] != b'ftyp': + return False + if len(buf) < int(codecs.encode(buf[0:4], 'hex'), 16): + return False + return True + + def _get_ftyp(self, buf): + ftyp_len = int(codecs.encode(buf[0:4], 'hex'), 16) + major_brand = buf[8:12].decode() + minor_version = int(codecs.encode(buf[12:16], 'hex'), 16) + compatible_brands = [] + for i in range(16, ftyp_len, 4): + compatible_brands.append(buf[i:i+4].decode()) + + return major_brand, minor_version, compatible_brands diff --git a/utill/filetype/types/video.py b/utill/filetype/types/video.py new file mode 100644 index 0000000..9955397 --- /dev/null +++ b/utill/filetype/types/video.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .base import Type +from .isobmff import IsoBmff + + +class Mp4(IsoBmff): + """ + Implements the MP4 video type matcher. + """ + MIME = 'video/mp4' + EXTENSION = 'mp4' + + def __init__(self): + super(Mp4, self).__init__( + mime=Mp4.MIME, + extension=Mp4.EXTENSION + ) + + def match(self, buf): + if not self._is_isobmff(buf): + return False + + major_brand, minor_version, compatible_brands = self._get_ftyp(buf) + return major_brand in ['mp41', 'mp42'] + + +class M4v(Type): + """ + Implements the M4V video type matcher. + """ + MIME = 'video/x-m4v' + EXTENSION = 'm4v' + + def __init__(self): + super(M4v, self).__init__( + mime=M4v.MIME, + extension=M4v.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 10 and + buf[0] == 0x0 and buf[1] == 0x0 and + buf[2] == 0x0 and buf[3] == 0x1C and + buf[4] == 0x66 and buf[5] == 0x74 and + buf[6] == 0x79 and buf[7] == 0x70 and + buf[8] == 0x4D and buf[9] == 0x34 and + buf[10] == 0x56) + + +class Mkv(Type): + """ + Implements the MKV video type matcher. + """ + MIME = 'video/x-matroska' + EXTENSION = 'mkv' + + def __init__(self): + super(Mkv, self).__init__( + mime=Mkv.MIME, + extension=Mkv.EXTENSION + ) + + def match(self, buf): + return ((len(buf) > 15 and + buf[0] == 0x1A and buf[1] == 0x45 and + buf[2] == 0xDF and buf[3] == 0xA3 and + buf[4] == 0x93 and buf[5] == 0x42 and + buf[6] == 0x82 and buf[7] == 0x88 and + buf[8] == 0x6D and buf[9] == 0x61 and + buf[10] == 0x74 and buf[11] == 0x72 and + buf[12] == 0x6F and buf[13] == 0x73 and + buf[14] == 0x6B and buf[15] == 0x61) or + (len(buf) > 38 and + buf[31] == 0x6D and buf[32] == 0x61 and + buf[33] == 0x74 and buf[34] == 0x72 and + buf[35] == 0x6f and buf[36] == 0x73 and + buf[37] == 0x6B and buf[38] == 0x61)) + + +class Webm(Type): + """ + Implements the WebM video type matcher. + """ + MIME = 'video/webm' + EXTENSION = 'webm' + + def __init__(self): + super(Webm, self).__init__( + mime=Webm.MIME, + extension=Webm.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x1A and + buf[1] == 0x45 and + buf[2] == 0xDF and + buf[3] == 0xA3) + + +class Mov(IsoBmff): + """ + Implements the MOV video type matcher. + """ + MIME = 'video/quicktime' + EXTENSION = 'mov' + + def __init__(self): + super(Mov, self).__init__( + mime=Mov.MIME, + extension=Mov.EXTENSION + ) + + def match(self, buf): + if not self._is_isobmff(buf): + return False + + major_brand, minor_version, compatible_brands = self._get_ftyp(buf) + return major_brand == 'qt ' + + +class Avi(Type): + """ + Implements the AVI video type matcher. + """ + MIME = 'video/x-msvideo' + EXTENSION = 'avi' + + def __init__(self): + super(Avi, self).__init__( + mime=Avi.MIME, + extension=Avi.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 10 and + buf[0] == 0x52 and + buf[1] == 0x49 and + buf[2] == 0x46 and + buf[3] == 0x46 and + buf[8] == 0x41 and + buf[9] == 0x56 and + buf[10] == 0x49) + + +class Wmv(Type): + """ + Implements the WMV video type matcher. + """ + MIME = 'video/x-ms-wmv' + EXTENSION = 'wmv' + + def __init__(self): + super(Wmv, self).__init__( + mime=Wmv.MIME, + extension=Wmv.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 9 and + buf[0] == 0x30 and + buf[1] == 0x26 and + buf[2] == 0xB2 and + buf[3] == 0x75 and + buf[4] == 0x8E and + buf[5] == 0x66 and + buf[6] == 0xCF and + buf[7] == 0x11 and + buf[8] == 0xA6 and + buf[9] == 0xD9) + + +class Flv(Type): + """ + Implements the FLV video type matcher. + """ + MIME = 'video/x-flv' + EXTENSION = 'flv' + + def __init__(self): + super(Flv, self).__init__( + mime=Flv.MIME, + extension=Flv.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x46 and + buf[1] == 0x4C and + buf[2] == 0x56 and + buf[3] == 0x01) + + +class Mpeg(Type): + """ + Implements the MPEG video type matcher. + """ + MIME = 'video/mpeg' + EXTENSION = 'mpg' + + def __init__(self): + super(Mpeg, self).__init__( + mime=Mpeg.MIME, + extension=Mpeg.EXTENSION + ) + + def match(self, buf): + return (len(buf) > 3 and + buf[0] == 0x0 and + buf[1] == 0x0 and + buf[2] == 0x1 and + buf[3] >= 0xb0 and + buf[3] <= 0xbf) diff --git a/utill/filetype/utils.py b/utill/filetype/utils.py new file mode 100644 index 0000000..e5f2a07 --- /dev/null +++ b/utill/filetype/utils.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +_NUM_SIGNATURE_BYTES = 262 + + +def get_signature_bytes(path): + """ + Reads file from disk and returns the first 262 bytes + of data representing the magic number header signature. + + Args: + path: path string to file. + + Returns: + First 262 bytes of the file content as bytearray type. + """ + with open(path, 'rb') as fp: + return bytearray(fp.read(_NUM_SIGNATURE_BYTES)) + + +def signature(array): + """ + Returns the first 262 bytes of the given bytearray + as part of the file header signature. + + Args: + array: bytearray to extract the header signature. + + Returns: + First 262 bytes of the file content as bytearray type. + """ + length = len(array) + index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length + + return array[:index] + + +def get_bytes(obj): + """ + Infers the input type and reads the first 262 bytes, + returning a sliced bytearray. + + Args: + obj: path to readable, file, bytes or bytearray. + + Returns: + First 262 bytes of the file content as bytearray type. + + Raises: + TypeError: if obj is not a supported type. + """ + try: + obj = obj.read(_NUM_SIGNATURE_BYTES) + except AttributeError: + # duck-typing as readable failed - we'll try the other options + pass + + kind = type(obj) + + if kind is bytearray: + return signature(obj) + + if kind is str: + return get_signature_bytes(obj) + + if kind is bytes: + return signature(obj) + + if kind is memoryview: + return signature(obj).tolist() + + raise TypeError('Unsupported type as file input: %s' % kind) diff --git a/utill/http.py b/utill/http.py new file mode 100644 index 0000000..7973038 --- /dev/null +++ b/utill/http.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +import requests,traceback +from requests.packages.urllib3.exceptions import InsecureRequestWarning +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +class Http: + "http请求类" + set_proxies=None #设置代理 + set_cookies={} #设置请求cookie + set_header={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'} #请求头 + set_timeout=10 #超时 20秒 + set_max_retries=2 #重试次数 (实际请求3次) + set_verify=False #SSL 证书的验证 sll证书路径 + set_encoding="utf-8" #设置text输出编码 + set_session=True #是否启用会话 + + get_header={} #获取响应头 + get_cookies={} #获取最后的响应cookie + get_cookie_str='' #获取最后的响应cookie 字符串 + get_text='' #获取body响应内容 + get_content='' #获取body响应二进制内容 + get_response='' #获取响应对象 + get_status_code=None #获取响应状态码 + + req=None + def gettext(self): + """得到响应text""" + return self.get_text + def openurl(self,url,method="GET",data=None,files=None): + """模拟浏览器请求 + + url : 目标地址 + + method :GET POST + + data:请求参数 + """ + if self.set_session: + if self.req is None: + self.req = requests.Session() + self.req.mount('http://', requests.adapters.HTTPAdapter(max_retries=self.set_max_retries)) + self.req.mount('https://', requests.adapters.HTTPAdapter(max_retries=self.set_max_retries)) + else: + if self.req is None: + self.req = requests + if self.set_cookies and isinstance(self.set_cookies,str): + self.cookieserTdict() + response=self.req.request(method, url,data=data,files=files,proxies=self.set_proxies,cookies=self.set_cookies,headers=self.set_header,timeout=self.set_timeout,verify=self.set_verify) + response.encoding=self.set_encoding + self.get_header=dict(response.headers) + cookie=requests.utils.dict_from_cookiejar(response.cookies) + if self.get_cookies and cookie: + self.get_cookies=self.__merge(self.get_cookies,cookie) + elif cookie: + self.get_cookies=cookie + if self.set_cookies: + self.get_cookies=self.__merge(self.set_cookies,self.get_cookies) + if self.get_cookies: + cookies='' + for key in self.get_cookies: + cookies=cookies+key+"="+self.get_cookies[key]+";" + self.get_cookie_str=cookies + self.get_text=response.text + self.get_content=response.content + self.get_response=response + self.get_status_code=int(response.status_code) + def __merge(self,dict1, dict2): + "合并两个字典" + C_dict = {} + for key,value in dict1.items(): + C_dict[key]=value + for key,value in dict2.items(): + C_dict[key]=value + return C_dict + def cookieserTdict(self): + "cookies字符串转换字典" + if isinstance(self.set_cookies,str): + cok={} + for line in self.set_cookies.split(";"): + lists=line.split("=",1) + # print(lists[]) + if lists[0]: + cok[lists[0]]=lists[1] + self.set_cookies=cok \ No newline at end of file diff --git a/utill/queues.py b/utill/queues.py new file mode 100644 index 0000000..5dd9d70 --- /dev/null +++ b/utill/queues.py @@ -0,0 +1,101 @@ +from queue import Queue +from .db import model +from .db import sqlite as kcwsqlite +import threading,time,os,hashlib,random +queuesdbpath=os.path.split(os.path.realpath(__file__))[0]+"/Queues" +class model_task(model.model): + "任务" + config={'type':'sqlite','db':queuesdbpath} + model.dbtype.conf=config + table="Queues" + fields={ + "id":model.dbtype.int(LEN=11,PRI=True,A_L=True), #设置id为自增主键 + "taskid":model.dbtype.varchar(LEN=32,DEFAULT=''), #设置id为自增主键 + "title":model.dbtype.varchar(LEN=1024,DEFAULT=''), #名称 + "describes":model.dbtype.varchar(LEN=2048,DEFAULT=''), #描述 + "code":model.dbtype.int(LEN=11,DEFAULT=2), #状态码 0成功 1失败 2等待中 3正在执行 4完成 + "msg":model.dbtype.text(), #状态描述 + "error":model.dbtype.text(), #异常信息 + "addtime":model.dbtype.int(LEN=11,DEFAULT=0) #添加时间 + } +class Queues(): + __globalqueue=None + def start(): + Queues.__globalqueue=Queue() + t=threading.Thread(target=Queues.__messagequeue) + t.daemon=True + t.start() + def __messagequeue(): + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where(True).delete() + while True: + if not Queues.__globalqueue.empty(): + value=Queues.__globalqueue.get() + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":3,"msg":"正在执行","error":""}) + if value['args']: + try: + value['target'](*value['args']) + except Exception as e: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) + else: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) + else: + try: + value['target']() + except Exception as e: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":1,"msg":"失败","error":str(e)}) + else: + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").where("taskid = '"+value['task']['taskid']+"' and code!=4").update({"code":4,"msg":"执行完成"}) + else: + time.sleep(1) + def insert(target,args=None,title="默认任务",describes="",msg='等待中'): #add_queue + """添加队列 + + target 方法名 必须 + + args 方法参数 非必须 如 + + title 任务名称 + + describes 任务描述 + + return taskid + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + ttt=int(time.time()) + print(ttt) + m = hashlib.md5() + m.update((str(ttt)+str(random.randint(100000,999999))).encode(encoding='utf-8')) + taskid=m.hexdigest() + task={"taskid":taskid,"title":title,"describes":describes,"code":2,"msg":msg,"error":"","addtime":ttt} + key={"target":target,"args":args,"task":task} + Queues.__globalqueue.put(key) + kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").insert(task) + return taskid + def getall(code=''): + """获取全部队列 + + code 1获取失败的任务 2获取等待中的任务 3获取正在执行中的任务 4获取执行完成的任务 + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + where=False + if code: + where="code="+code + # else: + # where="code!=4" + return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where(where).select() + def status(taskid): + """获取任务状态 + + taskid 任务id + """ + if not os.path.isfile(queuesdbpath): + t=model_task() + t.create_table() + return kcwsqlite.sqlite().connect(queuesdbpath).table("Queues").field("taskid,title,describes,code,msg,error,addtime").where("taskid",taskid).find() \ No newline at end of file diff --git a/utill/redis.py b/utill/redis.py new file mode 100644 index 0000000..d717e68 --- /dev/null +++ b/utill/redis.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +from kcweb.utill import rediss as red +from kcweb import config +import json +class redis: + "redis 注意:连接池链接模式下不支持动态配置" + __redisObj=None + __config=config.redis + def __connects(self): + """设置redis链接""" + if self.__config['pattern']: + if not self.__redisObj: + if self.__config['password']: + redis_pool=red.ConnectionPool(host=self.__config['host'],password=self.__config['password'],port=self.__config['port'],db=self.__config['db']) + else: + redis_pool=red.ConnectionPool(host=self.__config['host'],port=self.__config['port'],db=self.__config['db']) + self.__redisObj=red.Redis(connection_pool=redis_pool) + else: + if self.__config['password']: + self.__redisObj=red.Redis(host=self.__config['host'],password=self.__config['password'],port=self.__config['port'],db=self.__config['db']) + else: + self.__redisObj=red.Redis(host=self.__config['host'],port=self.__config['port'],db=self.__config['db']) + # print(self.__redisObj) + def __json_decode(self,strs): + """json字符串转python类型""" + try: + return json.loads(strs) + except Exception: + return {} + def __json_encode(self,strs): + """转成字符串""" + try: + return json.dumps(strs,ensure_ascii=False) + except Exception: + return "" + def getconfig(self): + return self.__config + def connect(self,config): + """设置redis链接信息 + + 参数 config 参考配置信息格式 + + 返回 redis + """ + if config: + if isinstance(config,dict): + if "host" in config: + self.__config['host']=config['host'] + if "port" in config: + self.__config['port']=config['port'] + if "password" in config: + self.__config['password']=config['password'] + if "db" in config: + self.__config['db']=config['db'] + return self + def redisObj(self): + "得到一个redis连接对象,执行更多高级操作" + self.__connects() + return self.__redisObj + def getstr(self,name): + """获取name的值 + + name,键 + 返回键“name”处的值,如果该键不存在,则返回“none” + """ + self.__connects() + return self.__redisObj.get(name) + def setstr(self,name,value,ex=None, px=None, nx=False, xx=False): + """ + name,键 + + value,值 只能是字符串 + + ex,过期时间(秒) + + px,过期时间(毫秒) + + nx,如果设置为True,则只有key不存在时,当前set操作才执行,同#setnx(key, value) + + xx,如果设置为True,则只有key存在时,当前set操作才执行 + """ + if not self.__redisObj: + self.__connects() + if not ex and not px: + if self.__config['ex']: + ex=self.__config['ex'] + return self.__redisObj.set(name, value, ex=ex, px=px, nx=nx, xx=xx) + def append(self,name,value): + """将字符串“value”追加到“name”处的值。如果``键`` 不存在,请使用值“name”创建它。 返回位于“name”的值的新长度。 + + name,键 + + value,值 只能是字符串 + """ + self.__connects() + return self.__redisObj.append(name,value) + + def set(self,name,value,ex=None, px=None, nx=False, xx=False): + """ + name,键 + + value,值 可以是字典 列表 或字符串 + + ex,过期时间(秒) + + px,过期时间(毫秒) + + nx,如果设置为True,则只有key不存在时,当前set操作才执行 + + xx,如果设置为True,则只有key存在时,当前set操作才执行 + """ + if not self.__redisObj: + self.__connects() + if not ex and not px: + if self.__config['ex']: + ex=self.__config['ex'] + value=self.__json_encode(value) + return self.__redisObj.set(name, value, ex=ex, px=px, nx=nx, xx=xx) + + def get(self,name): + """获取name的值 + + name,键 + 返回键“name”处的值,如果该键不存在,则返回“none” + """ + self.__connects() + value=self.__redisObj.get(name) + if value: + value=self.__json_decode(value) + return value + def delete(self,name): + """删除name的值 + + name,键 + + 返回 True,如果该键不存在,则返回 0 + """ + self.__connects() + return self.__redisObj.delete(name) + def rpush(self,name, *values): + "元素从list的右边加入 ,可以添加多个" + self.__connects() + # print(self.__config) + return self.__redisObj.rpush(name, *values) + def rpop(self,name): + "元素从list的右边移出" + self.__connects() + return self.__redisObj.rpop(name) + def rpoplpush(self,src, dst): + "元素从list的右边移出,并且从list的左边加入" + self.__connects() + return self.__redisObj.rpoplpush(src, dst) + def rpushx(self,name, value): + "当name存在时,元素才能从list的右边加入" + self.__connects() + return self.__redisObj.rpushx(name, value) + def lpush(self,name, *values): + "元素从list的左边加入,可以添加多个" + self.__connects() + return self.__redisObj.lpush(name, *values) + def lpop(self,name): + "元素从list的左边移出" + self.__connects() + return self.__redisObj.lpop(name) + def lpushxs(self,name): + "当name存在时,元素才能从list的左边加入" + self.__connects() + return self.__redisObj.lpushx(name) + def hset(self,name,key,value): + """在hash名称中将key设置为value如果HSET创建了新字段,则返回1,否则返回0 + + name,名 + + key,键 + + mapping,值 + """ + self.__connects() + return self.__redisObj.hset(name,key,value) + def hget(self,name,key): + "返回hash的name中的key值" + self.__connects() + return self.__redisObj.hget(name,key) + def hgetall(self,name): + "返回hash名称/值对的Python dict" + self.__connects() + return self.__redisObj.hgetall(name) + def hmset(self,name,mapping,ex=None): + """在hash的name中为每个键设置值 + name,键 + + mapping,值 + + ex,过期时间(秒) + + """ + self.__connects() + boot = self.__redisObj.hmset(name,mapping) + + if not ex: + if self.__config['ex']: + ex=self.__config['ex'] + if ex: + self.__redisObj.expire(name,ex) + return boot + def hmget(self,name, keys, *args): + "返回与“keys”顺序相同的值列表``" + self.__connects() + return self.__redisObj.hmget(name, keys, *args) + diff --git a/utill/rediss/__init__.py b/utill/rediss/__init__.py new file mode 100644 index 0000000..8c2be51 --- /dev/null +++ b/utill/rediss/__init__.py @@ -0,0 +1,41 @@ +from .client import Redis, StrictRedis +from .connection import ( + BlockingConnectionPool, + ConnectionPool, + Connection, + SSLConnection, + UnixDomainSocketConnection +) +from .utils import from_url +from .exceptions import ( + AuthenticationError, + BusyLoadingError, + ConnectionError, + DataError, + InvalidResponse, + PubSubError, + ReadOnlyError, + RedisError, + ResponseError, + TimeoutError, + WatchError +) + + +def int_or_str(value): + try: + return int(value) + except ValueError: + return value + + +__version__ = '3.3.8' +VERSION = tuple(map(int_or_str, __version__.split('.'))) + +__all__ = [ + 'Redis', 'StrictRedis', 'ConnectionPool', 'BlockingConnectionPool', + 'Connection', 'SSLConnection', 'UnixDomainSocketConnection', 'from_url', + 'AuthenticationError', 'BusyLoadingError', 'ConnectionError', 'DataError', + 'InvalidResponse', 'PubSubError', 'ReadOnlyError', 'RedisError', + 'ResponseError', 'TimeoutError', 'WatchError' +] diff --git a/utill/rediss/_compat.py b/utill/rediss/_compat.py new file mode 100644 index 0000000..d70af2a --- /dev/null +++ b/utill/rediss/_compat.py @@ -0,0 +1,138 @@ +"""Internal module for Python 2 backwards compatibility.""" +import errno +import socket +import sys + +# For Python older than 3.5, retry EINTR. +if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and + sys.version_info[1] < 5): + # Adapted from https://bugs.python.org/review/23863/patch/14532/54418 + import time + + # Wrapper for handling interruptable system calls. + def _retryable_call(s, func, *args, **kwargs): + # Some modules (SSL) use the _fileobject wrapper directly and + # implement a smaller portion of the socket interface, thus we + # need to let them continue to do so. + timeout, deadline = None, 0.0 + attempted = False + try: + timeout = s.gettimeout() + except AttributeError: + pass + + if timeout: + deadline = time.time() + timeout + + try: + while True: + if attempted and timeout: + now = time.time() + if now >= deadline: + raise socket.error(errno.EWOULDBLOCK, "timed out") + else: + # Overwrite the timeout on the socket object + # to take into account elapsed time. + s.settimeout(deadline - now) + try: + attempted = True + return func(*args, **kwargs) + except socket.error as e: + if e.args[0] == errno.EINTR: + continue + raise + finally: + # Set the existing timeout back for future + # calls. + if timeout: + s.settimeout(timeout) + + def recv(sock, *args, **kwargs): + return _retryable_call(sock, sock.recv, *args, **kwargs) + + def recv_into(sock, *args, **kwargs): + return _retryable_call(sock, sock.recv_into, *args, **kwargs) + +else: # Python 3.5 and above automatically retry EINTR + def recv(sock, *args, **kwargs): + return sock.recv(*args, **kwargs) + + def recv_into(sock, *args, **kwargs): + return sock.recv_into(*args, **kwargs) + +if sys.version_info[0] < 3: + from urllib import unquote + from urlparse import parse_qs, urlparse + from itertools import imap, izip + from string import letters as ascii_letters + from Queue import Queue + + # special unicode handling for python2 to avoid UnicodeDecodeError + def safe_unicode(obj, *args): + """ return the unicode representation of obj """ + try: + return unicode(obj, *args) + except UnicodeDecodeError: + # obj is byte string + ascii_text = str(obj).encode('string_escape') + return unicode(ascii_text) + + def iteritems(x): + return x.iteritems() + + def iterkeys(x): + return x.iterkeys() + + def itervalues(x): + return x.itervalues() + + def nativestr(x): + return x if isinstance(x, str) else x.encode('utf-8', 'replace') + + def next(x): + return x.next() + + def byte_to_chr(x): + return x + + unichr = unichr + xrange = xrange + basestring = basestring + unicode = unicode + long = long + BlockingIOError = socket.error +else: + from urllib.parse import parse_qs, unquote, urlparse + from string import ascii_letters + from queue import Queue + + def iteritems(x): + return iter(x.items()) + + def iterkeys(x): + return iter(x.keys()) + + def itervalues(x): + return iter(x.values()) + + def byte_to_chr(x): + return chr(x) + + def nativestr(x): + return x if isinstance(x, str) else x.decode('utf-8', 'replace') + + next = next + unichr = chr + imap = map + izip = zip + xrange = range + basestring = str + unicode = str + safe_unicode = str + long = int + BlockingIOError = BlockingIOError + +try: # Python 3 + from queue import LifoQueue, Empty, Full +except ImportError: # Python 2 + from Queue import LifoQueue, Empty, Full diff --git a/utill/rediss/client.py b/utill/rediss/client.py new file mode 100644 index 0000000..675b0d0 --- /dev/null +++ b/utill/rediss/client.py @@ -0,0 +1,3865 @@ +from __future__ import unicode_literals +from itertools import chain +import datetime +import sys +import warnings +import time +import threading +import time as mod_time +import re +import hashlib +from ._compat import (basestring, imap, iteritems, iterkeys, + itervalues, izip, long, nativestr, safe_unicode) +from .connection import (ConnectionPool, UnixDomainSocketConnection, + SSLConnection) +from .lock import Lock +from .exceptions import ( + ConnectionError, + DataError, + ExecAbortError, + NoScriptError, + PubSubError, + RedisError, + ResponseError, + TimeoutError, + WatchError, +) + +SYM_EMPTY = b'' +EMPTY_RESPONSE = 'EMPTY_RESPONSE' + + +def list_or_args(keys, args): + # returns a single new list combining keys and args + try: + iter(keys) + # a string or bytes instance can be iterated, but indicates + # keys wasn't passed as a list + if isinstance(keys, (basestring, bytes)): + keys = [keys] + else: + keys = list(keys) + except TypeError: + keys = [keys] + if args: + keys.extend(args) + return keys + + +def timestamp_to_datetime(response): + "Converts a unix timestamp to a Python datetime object" + if not response: + return None + try: + response = int(response) + except ValueError: + return None + return datetime.datetime.fromtimestamp(response) + + +def string_keys_to_dict(key_string, callback): + return dict.fromkeys(key_string.split(), callback) + + +def dict_merge(*dicts): + merged = {} + for d in dicts: + merged.update(d) + return merged + + +class CaseInsensitiveDict(dict): + "Case insensitive dict implementation. Assumes string keys only." + + def __init__(self, data): + for k, v in iteritems(data): + self[k.upper()] = v + + def __contains__(self, k): + return super(CaseInsensitiveDict, self).__contains__(k.upper()) + + def __delitem__(self, k): + super(CaseInsensitiveDict, self).__delitem__(k.upper()) + + def __getitem__(self, k): + return super(CaseInsensitiveDict, self).__getitem__(k.upper()) + + def get(self, k, default=None): + return super(CaseInsensitiveDict, self).get(k.upper(), default) + + def __setitem__(self, k, v): + super(CaseInsensitiveDict, self).__setitem__(k.upper(), v) + + def update(self, data): + data = CaseInsensitiveDict(data) + super(CaseInsensitiveDict, self).update(data) + + +def parse_debug_object(response): + "Parse the results of Redis's DEBUG OBJECT command into a Python dict" + # The 'type' of the object is the first item in the response, but isn't + # prefixed with a name + response = nativestr(response) + response = 'type:' + response + response = dict(kv.split(':') for kv in response.split()) + + # parse some expected int values from the string response + # note: this cmd isn't spec'd so these may not appear in all redis versions + int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') + for field in int_fields: + if field in response: + response[field] = int(response[field]) + + return response + + +def parse_object(response, infotype): + "Parse the results of an OBJECT command" + if infotype in ('idletime', 'refcount'): + return int_or_none(response) + return response + + +def parse_info(response): + "Parse the result of Redis's INFO command into a Python dict" + info = {} + response = nativestr(response) + + def get_value(value): + if ',' not in value or '=' not in value: + try: + if '.' in value: + return float(value) + else: + return int(value) + except ValueError: + return value + else: + sub_dict = {} + for item in value.split(','): + k, v = item.rsplit('=', 1) + sub_dict[k] = get_value(v) + return sub_dict + + for line in response.splitlines(): + if line and not line.startswith('#'): + if line.find(':') != -1: + # Split, the info fields keys and values. + # Note that the value may contain ':'. but the 'host:' + # pseudo-command is the only case where the key contains ':' + key, value = line.split(':', 1) + if key == 'cmdstat_host': + key, value = line.rsplit(':', 1) + info[key] = get_value(value) + else: + # if the line isn't splittable, append it to the "__raw__" key + info.setdefault('__raw__', []).append(line) + + return info + + +SENTINEL_STATE_TYPES = { + 'can-failover-its-master': int, + 'config-epoch': int, + 'down-after-milliseconds': int, + 'failover-timeout': int, + 'info-refresh': int, + 'last-hello-message': int, + 'last-ok-ping-reply': int, + 'last-ping-reply': int, + 'last-ping-sent': int, + 'master-link-down-time': int, + 'master-port': int, + 'num-other-sentinels': int, + 'num-slaves': int, + 'o-down-time': int, + 'pending-commands': int, + 'parallel-syncs': int, + 'port': int, + 'quorum': int, + 'role-reported-time': int, + 's-down-time': int, + 'slave-priority': int, + 'slave-repl-offset': int, + 'voted-leader-epoch': int +} + + +def parse_sentinel_state(item): + result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) + flags = set(result['flags'].split(',')) + for name, flag in (('is_master', 'master'), ('is_slave', 'slave'), + ('is_sdown', 's_down'), ('is_odown', 'o_down'), + ('is_sentinel', 'sentinel'), + ('is_disconnected', 'disconnected'), + ('is_master_down', 'master_down')): + result[name] = flag in flags + return result + + +def parse_sentinel_master(response): + return parse_sentinel_state(imap(nativestr, response)) + + +def parse_sentinel_masters(response): + result = {} + for item in response: + state = parse_sentinel_state(imap(nativestr, item)) + result[state['name']] = state + return result + + +def parse_sentinel_slaves_and_sentinels(response): + return [parse_sentinel_state(imap(nativestr, item)) for item in response] + + +def parse_sentinel_get_master(response): + return response and (response[0], int(response[1])) or None + + +def pairs_to_dict(response, decode_keys=False): + "Create a dict given a list of key/value pairs" + if response is None: + return {} + if decode_keys: + # the iter form is faster, but I don't know how to make that work + # with a nativestr() map + return dict(izip(imap(nativestr, response[::2]), response[1::2])) + else: + it = iter(response) + return dict(izip(it, it)) + + +def pairs_to_dict_typed(response, type_info): + it = iter(response) + result = {} + for key, value in izip(it, it): + if key in type_info: + try: + value = type_info[key](value) + except Exception: + # if for some reason the value can't be coerced, just use + # the string value + pass + result[key] = value + return result + + +def zset_score_pairs(response, **options): + """ + If ``withscores`` is specified in the options, return the response as + a list of (value, score) pairs + """ + if not response or not options.get('withscores'): + return response + score_cast_func = options.get('score_cast_func', float) + it = iter(response) + return list(izip(it, imap(score_cast_func, it))) + + +def sort_return_tuples(response, **options): + """ + If ``groups`` is specified, return the response as a list of + n-element tuples with n being the value found in options['groups'] + """ + if not response or not options.get('groups'): + return response + n = options['groups'] + return list(izip(*[response[i::n] for i in range(n)])) + + +def int_or_none(response): + if response is None: + return None + return int(response) + + +def nativestr_or_none(response): + if response is None: + return None + return nativestr(response) + + +def parse_stream_list(response): + if response is None: + return None + data = [] + for r in response: + if r is not None: + data.append((r[0], pairs_to_dict(r[1]))) + else: + data.append((None, None)) + return data + + +def pairs_to_dict_with_nativestr_keys(response): + return pairs_to_dict(response, decode_keys=True) + + +def parse_list_of_dicts(response): + return list(imap(pairs_to_dict_with_nativestr_keys, response)) + + +def parse_xclaim(response, **options): + if options.get('parse_justid', False): + return response + return parse_stream_list(response) + + +def parse_xinfo_stream(response): + data = pairs_to_dict(response, decode_keys=True) + first = data['first-entry'] + if first is not None: + data['first-entry'] = (first[0], pairs_to_dict(first[1])) + last = data['last-entry'] + if last is not None: + data['last-entry'] = (last[0], pairs_to_dict(last[1])) + return data + + +def parse_xread(response): + if response is None: + return [] + return [[r[0], parse_stream_list(r[1])] for r in response] + + +def parse_xpending(response, **options): + if options.get('parse_detail', False): + return parse_xpending_range(response) + consumers = [{'name': n, 'pending': long(p)} for n, p in response[3] or []] + return { + 'pending': response[0], + 'min': response[1], + 'max': response[2], + 'consumers': consumers + } + + +def parse_xpending_range(response): + k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered') + return [dict(izip(k, r)) for r in response] + + +def float_or_none(response): + if response is None: + return None + return float(response) + + +def bool_ok(response): + return nativestr(response) == 'OK' + + +def parse_zadd(response, **options): + if response is None: + return None + if options.get('as_score'): + return float(response) + return int(response) + + +def parse_client_list(response, **options): + clients = [] + for c in nativestr(response).splitlines(): + # Values might contain '=' + clients.append(dict(pair.split('=', 1) for pair in c.split(' '))) + return clients + + +def parse_config_get(response, **options): + response = [nativestr(i) if i is not None else None for i in response] + return response and pairs_to_dict(response) or {} + + +def parse_scan(response, **options): + cursor, r = response + return long(cursor), r + + +def parse_hscan(response, **options): + cursor, r = response + return long(cursor), r and pairs_to_dict(r) or {} + + +def parse_zscan(response, **options): + score_cast_func = options.get('score_cast_func', float) + cursor, r = response + it = iter(r) + return long(cursor), list(izip(it, imap(score_cast_func, it))) + + +def parse_slowlog_get(response, **options): + return [{ + 'id': item[0], + 'start_time': int(item[1]), + 'duration': int(item[2]), + 'command': b' '.join(item[3]) + } for item in response] + + +def parse_cluster_info(response, **options): + response = nativestr(response) + return dict(line.split(':') for line in response.splitlines() if line) + + +def _parse_node_line(line): + line_items = line.split(' ') + node_id, addr, flags, master_id, ping, pong, epoch, \ + connected = line.split(' ')[:8] + slots = [sl.split('-') for sl in line_items[8:]] + node_dict = { + 'node_id': node_id, + 'flags': flags, + 'master_id': master_id, + 'last_ping_sent': ping, + 'last_pong_rcvd': pong, + 'epoch': epoch, + 'slots': slots, + 'connected': True if connected == 'connected' else False + } + return addr, node_dict + + +def parse_cluster_nodes(response, **options): + response = nativestr(response) + raw_lines = response + if isinstance(response, basestring): + raw_lines = response.splitlines() + return dict(_parse_node_line(line) for line in raw_lines) + + +def parse_georadius_generic(response, **options): + if options['store'] or options['store_dist']: + # `store` and `store_diff` cant be combined + # with other command arguments. + return response + + if type(response) != list: + response_list = [response] + else: + response_list = response + + if not options['withdist'] and not options['withcoord']\ + and not options['withhash']: + # just a bunch of places + return response_list + + cast = { + 'withdist': float, + 'withcoord': lambda ll: (float(ll[0]), float(ll[1])), + 'withhash': int + } + + # zip all output results with each casting functino to get + # the properly native Python value. + f = [lambda x: x] + f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]] + return [ + list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list + ] + + +def parse_pubsub_numsub(response, **options): + return list(zip(response[0::2], response[1::2])) + + +def parse_client_kill(response, **options): + if isinstance(response, (long, int)): + return int(response) + return nativestr(response) == 'OK' + + +class Redis(object): + """ + Implementation of the Redis protocol. + + This abstract class provides a Python interface to all Redis commands + and an implementation of the Redis protocol. + + Connection and Pipeline derive from this, implementing how + the commands are sent and received to the Redis server + """ + RESPONSE_CALLBACKS = dict_merge( + string_keys_to_dict( + 'AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST ' + 'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX', + bool + ), + string_keys_to_dict( + 'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN ' + 'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD ' + 'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN ' + 'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM ' + 'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE', + int + ), + string_keys_to_dict( + 'INCRBYFLOAT HINCRBYFLOAT', + float + ), + string_keys_to_dict( + # these return OK, or int if redis-server is >=1.3.4 + 'LPUSH RPUSH', + lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK' + ), + string_keys_to_dict('SORT', sort_return_tuples), + string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none), + string_keys_to_dict( + 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE ' + 'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ', + bool_ok + ), + string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None), + string_keys_to_dict( + 'SDIFF SINTER SMEMBERS SUNION', + lambda r: r and set(r) or set() + ), + string_keys_to_dict( + 'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE', + zset_score_pairs + ), + string_keys_to_dict('BZPOPMIN BZPOPMAX', \ + lambda r: r and (r[0], r[1], float(r[2])) or None), + string_keys_to_dict('ZRANK ZREVRANK', int_or_none), + string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list), + string_keys_to_dict('XREAD XREADGROUP', parse_xread), + string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True), + { + 'CLIENT GETNAME': lambda r: r and nativestr(r), + 'CLIENT ID': int, + 'CLIENT KILL': parse_client_kill, + 'CLIENT LIST': parse_client_list, + 'CLIENT SETNAME': bool_ok, + 'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False, + 'CLIENT PAUSE': bool_ok, + 'CLUSTER ADDSLOTS': bool_ok, + 'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x), + 'CLUSTER COUNTKEYSINSLOT': lambda x: int(x), + 'CLUSTER DELSLOTS': bool_ok, + 'CLUSTER FAILOVER': bool_ok, + 'CLUSTER FORGET': bool_ok, + 'CLUSTER INFO': parse_cluster_info, + 'CLUSTER KEYSLOT': lambda x: int(x), + 'CLUSTER MEET': bool_ok, + 'CLUSTER NODES': parse_cluster_nodes, + 'CLUSTER REPLICATE': bool_ok, + 'CLUSTER RESET': bool_ok, + 'CLUSTER SAVECONFIG': bool_ok, + 'CLUSTER SET-CONFIG-EPOCH': bool_ok, + 'CLUSTER SETSLOT': bool_ok, + 'CLUSTER SLAVES': parse_cluster_nodes, + 'CONFIG GET': parse_config_get, + 'CONFIG RESETSTAT': bool_ok, + 'CONFIG SET': bool_ok, + 'DEBUG OBJECT': parse_debug_object, + 'GEOHASH': lambda r: list(map(nativestr_or_none, r)), + 'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]), + float(ll[1])) + if ll is not None else None, r)), + 'GEORADIUS': parse_georadius_generic, + 'GEORADIUSBYMEMBER': parse_georadius_generic, + 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, + 'HSCAN': parse_hscan, + 'INFO': parse_info, + 'LASTSAVE': timestamp_to_datetime, + 'MEMORY PURGE': bool_ok, + 'MEMORY USAGE': int_or_none, + 'OBJECT': parse_object, + 'PING': lambda r: nativestr(r) == 'PONG', + 'PUBSUB NUMSUB': parse_pubsub_numsub, + 'RANDOMKEY': lambda r: r and r or None, + 'SCAN': parse_scan, + 'SCRIPT EXISTS': lambda r: list(imap(bool, r)), + 'SCRIPT FLUSH': bool_ok, + 'SCRIPT KILL': bool_ok, + 'SCRIPT LOAD': nativestr, + 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master, + 'SENTINEL MASTER': parse_sentinel_master, + 'SENTINEL MASTERS': parse_sentinel_masters, + 'SENTINEL MONITOR': bool_ok, + 'SENTINEL REMOVE': bool_ok, + 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels, + 'SENTINEL SET': bool_ok, + 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels, + 'SET': lambda r: r and nativestr(r) == 'OK', + 'SLOWLOG GET': parse_slowlog_get, + 'SLOWLOG LEN': int, + 'SLOWLOG RESET': bool_ok, + 'SSCAN': parse_scan, + 'TIME': lambda x: (int(x[0]), int(x[1])), + 'XCLAIM': parse_xclaim, + 'XGROUP CREATE': bool_ok, + 'XGROUP DELCONSUMER': int, + 'XGROUP DESTROY': bool, + 'XGROUP SETID': bool_ok, + 'XINFO CONSUMERS': parse_list_of_dicts, + 'XINFO GROUPS': parse_list_of_dicts, + 'XINFO STREAM': parse_xinfo_stream, + 'XPENDING': parse_xpending, + 'ZADD': parse_zadd, + 'ZSCAN': parse_zscan, + } + ) + + @classmethod + def from_url(cls, url, db=None, **kwargs): + """ + Return a Redis client object configured from the given URL + + For example:: + + redis://[:password]@localhost:6379/0 + rediss://[:password]@localhost:6379/0 + unix://[:password]@/path/to/socket.sock?db=0 + + Three URL schemes are supported: + + - ```redis://`` + `_ creates a + normal TCP socket connection + - ```rediss://`` + `_ creates a + SSL wrapped TCP socket connection + - ``unix://`` creates a Unix Domain Socket connection + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. In the case + of conflicting arguments, querystring arguments always win. + """ + connection_pool = ConnectionPool.from_url(url, db=db, **kwargs) + return cls(connection_pool=connection_pool) + + def __init__(self, host='localhost', port=6379, + db=0, password=None, socket_timeout=None, + socket_connect_timeout=None, + socket_keepalive=None, socket_keepalive_options=None, + connection_pool=None, unix_socket_path=None, + encoding='utf-8', encoding_errors='strict', + charset=None, errors=None, + decode_responses=False, retry_on_timeout=False, + ssl=False, ssl_keyfile=None, ssl_certfile=None, + ssl_cert_reqs='required', ssl_ca_certs=None, + max_connections=None, single_connection_client=False, + health_check_interval=0): + if not connection_pool: + if charset is not None: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + encoding = charset + if errors is not None: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + encoding_errors = errors + + kwargs = { + 'db': db, + 'password': password, + 'socket_timeout': socket_timeout, + 'encoding': encoding, + 'encoding_errors': encoding_errors, + 'decode_responses': decode_responses, + 'retry_on_timeout': retry_on_timeout, + 'max_connections': max_connections, + 'health_check_interval': health_check_interval, + } + # based on input, setup appropriate connection args + if unix_socket_path is not None: + kwargs.update({ + 'path': unix_socket_path, + 'connection_class': UnixDomainSocketConnection + }) + else: + # TCP specific options + kwargs.update({ + 'host': host, + 'port': port, + 'socket_connect_timeout': socket_connect_timeout, + 'socket_keepalive': socket_keepalive, + 'socket_keepalive_options': socket_keepalive_options, + }) + + if ssl: + kwargs.update({ + 'connection_class': SSLConnection, + 'ssl_keyfile': ssl_keyfile, + 'ssl_certfile': ssl_certfile, + 'ssl_cert_reqs': ssl_cert_reqs, + 'ssl_ca_certs': ssl_ca_certs, + }) + connection_pool = ConnectionPool(**kwargs) + self.connection_pool = connection_pool + self.connection = None + if single_connection_client: + self.connection = self.connection_pool.get_connection('_') + + self.response_callbacks = CaseInsensitiveDict( + self.__class__.RESPONSE_CALLBACKS) + + def __repr__(self): + return "%s<%s>" % (type(self).__name__, repr(self.connection_pool)) + + def set_response_callback(self, command, callback): + "Set a custom Response Callback" + self.response_callbacks[command] = callback + + def pipeline(self, transaction=True, shard_hint=None): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from making a group of operations + atomic, pipelines are useful for reducing the back-and-forth overhead + between the client and server. + """ + return Pipeline( + self.connection_pool, + self.response_callbacks, + transaction, + shard_hint) + + def transaction(self, func, *watches, **kwargs): + """ + Convenience method for executing the callable `func` as a transaction + while watching all keys specified in `watches`. The 'func' callable + should expect a single argument which is a Pipeline object. + """ + shard_hint = kwargs.pop('shard_hint', None) + value_from_callable = kwargs.pop('value_from_callable', False) + watch_delay = kwargs.pop('watch_delay', None) + with self.pipeline(True, shard_hint) as pipe: + while True: + try: + if watches: + pipe.watch(*watches) + func_value = func(pipe) + exec_value = pipe.execute() + return func_value if value_from_callable else exec_value + except WatchError: + if watch_delay is not None and watch_delay > 0: + time.sleep(watch_delay) + continue + + def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, + lock_class=None, thread_local=True): + """ + Return a new Lock object using key ``name`` that mimics + the behavior of threading.Lock. + + If specified, ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``lock_class`` forces the specified lock implementation. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. """ + if lock_class is None: + lock_class = Lock + return lock_class(self, name, timeout=timeout, sleep=sleep, + blocking_timeout=blocking_timeout, + thread_local=thread_local) + + def pubsub(self, **kwargs): + """ + Return a Publish/Subscribe object. With this object, you can + subscribe to channels and listen for messages that get published to + them. + """ + return PubSub(self.connection_pool, **kwargs) + + def monitor(self): + return Monitor(self.connection_pool) + + def client(self): + return self.__class__(connection_pool=self.connection_pool, + single_connection_client=True) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __del__(self): + self.close() + + def close(self): + conn = self.connection + if conn: + self.connection = None + self.connection_pool.release(conn) + + # COMMAND EXECUTION AND PROTOCOL PARSING + def execute_command(self, *args, **options): + "Execute a command and return a parsed response" + pool = self.connection_pool + command_name = args[0] + conn = self.connection or pool.get_connection(command_name, **options) + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + raise + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + finally: + if not self.connection: + pool.release(conn) + + def parse_response(self, connection, command_name, **options): + "Parses a response from the Redis server" + try: + response = connection.read_response() + except ResponseError: + if EMPTY_RESPONSE in options: + return options[EMPTY_RESPONSE] + raise + if command_name in self.response_callbacks: + return self.response_callbacks[command_name](response, **options) + return response + + # SERVER INFORMATION + def bgrewriteaof(self): + "Tell the Redis server to rewrite the AOF file from data in memory." + return self.execute_command('BGREWRITEAOF') + + def bgsave(self): + """ + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + """ + return self.execute_command('BGSAVE') + + def client_kill(self, address): + "Disconnects the client at ``address`` (ip:port)" + return self.execute_command('CLIENT KILL', address) + + def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None): + """ + Disconnects client(s) using a variety of filter options + :param id: Kills a client by its unique ID field + :param type: Kills a client by type where type is one of 'normal', + 'master', 'slave' or 'pubsub' + :param addr: Kills a client by its 'address:port' + :param skipme: If True, then the client calling the command + will not get killed even if it is identified by one of the filter + options. If skipme is not provided, the server defaults to skipme=True + """ + args = [] + if _type is not None: + client_types = ('normal', 'master', 'slave', 'pubsub') + if str(_type).lower() not in client_types: + raise DataError("CLIENT KILL type must be one of %r" % ( + client_types,)) + args.extend((b'TYPE', _type)) + if skipme is not None: + if not isinstance(skipme, bool): + raise DataError("CLIENT KILL skipme must be a bool") + if skipme: + args.extend((b'SKIPME', b'YES')) + else: + args.extend((b'SKIPME', b'NO')) + if _id is not None: + args.extend((b'ID', _id)) + if addr is not None: + args.extend((b'ADDR', addr)) + if not args: + raise DataError("CLIENT KILL ... ... " + " must specify at least one filter") + return self.execute_command('CLIENT KILL', *args) + + def client_list(self, _type=None): + """ + Returns a list of currently connected clients. + If type of client specified, only that type will be returned. + :param _type: optional. one of the client types (normal, master, + replica, pubsub) + """ + "Returns a list of currently connected clients" + if _type is not None: + client_types = ('normal', 'master', 'replica', 'pubsub') + if str(_type).lower() not in client_types: + raise DataError("CLIENT LIST _type must be one of %r" % ( + client_types,)) + return self.execute_command('CLIENT LIST', b'TYPE', _type) + return self.execute_command('CLIENT LIST') + + def client_getname(self): + "Returns the current connection name" + return self.execute_command('CLIENT GETNAME') + + def client_id(self): + "Returns the current connection id" + return self.execute_command('CLIENT ID') + + def client_setname(self, name): + "Sets the current connection name" + return self.execute_command('CLIENT SETNAME', name) + + def client_unblock(self, client_id, error=False): + """ + Unblocks a connection by its client id. + If ``error`` is True, unblocks the client with a special error message. + If ``error`` is False (default), the client is unblocked using the + regular timeout mechanism. + """ + args = ['CLIENT UNBLOCK', int(client_id)] + if error: + args.append(b'ERROR') + return self.execute_command(*args) + + def client_pause(self, timeout): + """ + Suspend all the Redis clients for the specified amount of time + :param timeout: milliseconds to pause clients + """ + if not isinstance(timeout, (int, long)): + raise DataError("CLIENT PAUSE timeout must be an integer") + return self.execute_command('CLIENT PAUSE', str(timeout)) + + def readwrite(self): + "Disables read queries for a connection to a Redis Cluster slave node" + return self.execute_command('READWRITE') + + def readonly(self): + "Enables read queries for a connection to a Redis Cluster replica node" + return self.execute_command('READONLY') + + def config_get(self, pattern="*"): + "Return a dictionary of configuration based on the ``pattern``" + return self.execute_command('CONFIG GET', pattern) + + def config_set(self, name, value): + "Set config item ``name`` with ``value``" + return self.execute_command('CONFIG SET', name, value) + + def config_resetstat(self): + "Reset runtime statistics" + return self.execute_command('CONFIG RESETSTAT') + + def config_rewrite(self): + "Rewrite config file with the minimal change to reflect running config" + return self.execute_command('CONFIG REWRITE') + + def dbsize(self): + "Returns the number of keys in the current database" + return self.execute_command('DBSIZE') + + def debug_object(self, key): + "Returns version specific meta information about a given key" + return self.execute_command('DEBUG OBJECT', key) + + def echo(self, value): + "Echo the string back from the server" + return self.execute_command('ECHO', value) + + def flushall(self, asynchronous=False): + """ + Delete all keys in all databases on the current host. + + ``asynchronous`` indicates whether the operation is + executed asynchronously by the server. + """ + args = [] + if asynchronous: + args.append(b'ASYNC') + return self.execute_command('FLUSHALL', *args) + + def flushdb(self, asynchronous=False): + """ + Delete all keys in the current database. + + ``asynchronous`` indicates whether the operation is + executed asynchronously by the server. + """ + args = [] + if asynchronous: + args.append(b'ASYNC') + return self.execute_command('FLUSHDB', *args) + + def swapdb(self, first, second): + "Swap two databases" + return self.execute_command('SWAPDB', first, second) + + def info(self, section=None): + """ + Returns a dictionary containing information about the Redis server + + The ``section`` option can be used to select a specific section + of information + + The section option is not supported by older versions of Redis Server, + and will generate ResponseError + """ + if section is None: + return self.execute_command('INFO') + else: + return self.execute_command('INFO', section) + + def lastsave(self): + """ + Return a Python datetime object representing the last time the + Redis database was saved to disk + """ + return self.execute_command('LASTSAVE') + + def migrate(self, host, port, keys, destination_db, timeout, + copy=False, replace=False, auth=None): + """ + Migrate 1 or more keys from the current Redis server to a different + server specified by the ``host``, ``port`` and ``destination_db``. + + The ``timeout``, specified in milliseconds, indicates the maximum + time the connection between the two servers can be idle before the + command is interrupted. + + If ``copy`` is True, the specified ``keys`` are NOT deleted from + the source server. + + If ``replace`` is True, this operation will overwrite the keys + on the destination server if they exist. + + If ``auth`` is specified, authenticate to the destination server with + the password provided. + """ + keys = list_or_args(keys, []) + if not keys: + raise DataError('MIGRATE requires at least one key') + pieces = [] + if copy: + pieces.append(b'COPY') + if replace: + pieces.append(b'REPLACE') + if auth: + pieces.append(b'AUTH') + pieces.append(auth) + pieces.append(b'KEYS') + pieces.extend(keys) + return self.execute_command('MIGRATE', host, port, '', destination_db, + timeout, *pieces) + + def object(self, infotype, key): + "Return the encoding, idletime, or refcount about the key" + return self.execute_command('OBJECT', infotype, key, infotype=infotype) + + def memory_usage(self, key, samples=None): + """ + Return the total memory usage for key, its value and associated + administrative overheads. + + For nested data structures, ``samples`` is the number of elements to + sample. If left unspecified, the server's default is 5. Use 0 to sample + all elements. + """ + args = [] + if isinstance(samples, int): + args.extend([b'SAMPLES', samples]) + return self.execute_command('MEMORY USAGE', key, *args) + + def memory_purge(self): + "Attempts to purge dirty pages for reclamation by allocator" + return self.execute_command('MEMORY PURGE') + + def ping(self): + "Ping the Redis server" + return self.execute_command('PING') + + def save(self): + """ + Tell the Redis server to save its data to disk, + blocking until the save is complete + """ + return self.execute_command('SAVE') + + def sentinel(self, *args): + "Redis Sentinel's SENTINEL command." + warnings.warn( + DeprecationWarning('Use the individual sentinel_* methods')) + + def sentinel_get_master_addr_by_name(self, service_name): + "Returns a (host, port) pair for the given ``service_name``" + return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', + service_name) + + def sentinel_master(self, service_name): + "Returns a dictionary containing the specified masters state." + return self.execute_command('SENTINEL MASTER', service_name) + + def sentinel_masters(self): + "Returns a list of dictionaries containing each master's state." + return self.execute_command('SENTINEL MASTERS') + + def sentinel_monitor(self, name, ip, port, quorum): + "Add a new master to Sentinel to be monitored" + return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum) + + def sentinel_remove(self, name): + "Remove a master from Sentinel's monitoring" + return self.execute_command('SENTINEL REMOVE', name) + + def sentinel_sentinels(self, service_name): + "Returns a list of sentinels for ``service_name``" + return self.execute_command('SENTINEL SENTINELS', service_name) + + def sentinel_set(self, name, option, value): + "Set Sentinel monitoring parameters for a given master" + return self.execute_command('SENTINEL SET', name, option, value) + + def sentinel_slaves(self, service_name): + "Returns a list of slaves for ``service_name``" + return self.execute_command('SENTINEL SLAVES', service_name) + + def shutdown(self, save=False, nosave=False): + """Shutdown the Redis server. If Redis has persistence configured, + data will be flushed before shutdown. If the "save" option is set, + a data flush will be attempted even if there is no persistence + configured. If the "nosave" option is set, no data flush will be + attempted. The "save" and "nosave" options cannot both be set. + """ + if save and nosave: + raise DataError('SHUTDOWN save and nosave cannot both be set') + args = ['SHUTDOWN'] + if save: + args.append('SAVE') + if nosave: + args.append('NOSAVE') + try: + self.execute_command(*args) + except ConnectionError: + # a ConnectionError here is expected + return + raise RedisError("SHUTDOWN seems to have failed.") + + def slaveof(self, host=None, port=None): + """ + Set the server to be a replicated slave of the instance identified + by the ``host`` and ``port``. If called without arguments, the + instance is promoted to a master instead. + """ + if host is None and port is None: + return self.execute_command('SLAVEOF', b'NO', b'ONE') + return self.execute_command('SLAVEOF', host, port) + + def slowlog_get(self, num=None): + """ + Get the entries from the slowlog. If ``num`` is specified, get the + most recent ``num`` items. + """ + args = ['SLOWLOG GET'] + if num is not None: + args.append(num) + return self.execute_command(*args) + + def slowlog_len(self): + "Get the number of items in the slowlog" + return self.execute_command('SLOWLOG LEN') + + def slowlog_reset(self): + "Remove all items in the slowlog" + return self.execute_command('SLOWLOG RESET') + + def time(self): + """ + Returns the server time as a 2-item tuple of ints: + (seconds since epoch, microseconds into this second). + """ + return self.execute_command('TIME') + + def wait(self, num_replicas, timeout): + """ + Redis synchronous replication + That returns the number of replicas that processed the query when + we finally have at least ``num_replicas``, or when the ``timeout`` was + reached. + """ + return self.execute_command('WAIT', num_replicas, timeout) + + # BASIC KEY COMMANDS + def append(self, key, value): + """ + Appends the string ``value`` to the value at ``key``. If ``key`` + doesn't already exist, create it with a value of ``value``. + Returns the new length of the value at ``key``. + """ + return self.execute_command('APPEND', key, value) + + def bitcount(self, key, start=None, end=None): + """ + Returns the count of set bits in the value of ``key``. Optional + ``start`` and ``end`` paramaters indicate which bytes to consider + """ + params = [key] + if start is not None and end is not None: + params.append(start) + params.append(end) + elif (start is not None and end is None) or \ + (end is not None and start is None): + raise DataError("Both start and end must be specified") + return self.execute_command('BITCOUNT', *params) + + def bitfield(self, key, default_overflow=None): + """ + Return a BitFieldOperation instance to conveniently construct one or + more bitfield operations on ``key``. + """ + return BitFieldOperation(self, key, default_overflow=default_overflow) + + def bitop(self, operation, dest, *keys): + """ + Perform a bitwise operation using ``operation`` between ``keys`` and + store the result in ``dest``. + """ + return self.execute_command('BITOP', operation, dest, *keys) + + def bitpos(self, key, bit, start=None, end=None): + """ + Return the position of the first bit set to 1 or 0 in a string. + ``start`` and ``end`` difines search range. The range is interpreted + as a range of bytes and not a range of bits, so start=0 and end=2 + means to look at the first three bytes. + """ + if bit not in (0, 1): + raise DataError('bit must be 0 or 1') + params = [key, bit] + + start is not None and params.append(start) + + if start is not None and end is not None: + params.append(end) + elif start is None and end is not None: + raise DataError("start argument is not set, " + "when end is specified") + return self.execute_command('BITPOS', *params) + + def decr(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + # An alias for ``decr()``, because it is already implemented + # as DECRBY redis command. + return self.decrby(name, amount) + + def decrby(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + return self.execute_command('DECRBY', name, amount) + + def delete(self, *names): + "Delete one or more keys specified by ``names``" + return self.execute_command('DEL', *names) + + def __delitem__(self, name): + self.delete(name) + + def dump(self, name): + """ + 返回存储在指定键上的值的序列化版本。 + 如果键不存在,则返回零批量答复。 + """ + return self.execute_command('DUMP', name) + + def exists(self, *names): + "返回存在的“名称”的数目 Returns the number of ``names`` that exist" + return self.execute_command('EXISTS', *names) + __contains__ = exists + + def expire(self, name, time): + """ + 在“name”键上设置“time”秒的过期标志。``时间`` +可以用整数或python timedelta对象表示。 +Set an expire flag on key ``name`` for ``time`` seconds. ``time`` + can be represented by an integer or a Python timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds()) + return self.execute_command('EXPIRE', name, time) + + def expireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer indicating unix time or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + when = int(mod_time.mktime(when.timetuple())) + return self.execute_command('EXPIREAT', name, when) + + def get(self, name): + """获取name的值 + + name,键 + 返回键“name”处的值,如果该键不存在,则返回“none” + """ + return self.execute_command('GET', name) + + def __getitem__(self, name): + """ + 返回键“name”处的值,如果键不存在。 + """ + value = self.get(name) + if value is not None: + return value + raise KeyError(name) + + def getbit(self, name, offset): + "Returns a boolean indicating the value of ``offset`` in ``name``" + return self.execute_command('GETBIT', name, offset) + + def getrange(self, key, start, end): + """ + Returns the substring of the string value stored at ``key``, + determined by the offsets ``start`` and ``end`` (both are inclusive) + """ + return self.execute_command('GETRANGE', key, start, end) + + def getset(self, name, value): + """ + Sets the value at key ``name`` to ``value`` + and returns the old value at key ``name`` atomically. + """ + return self.execute_command('GETSET', name, value) + + def incr(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + return self.incrby(name, amount) + + def incrby(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + # An alias for ``incr()``, because it is already implemented + # as INCRBY redis command. + return self.execute_command('INCRBY', name, amount) + + def incrbyfloat(self, name, amount=1.0): + """ + Increments the value at key ``name`` by floating ``amount``. + If no key exists, the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBYFLOAT', name, amount) + + def keys(self, pattern='*'): + "Returns a list of keys matching ``pattern``" + return self.execute_command('KEYS', pattern) + + def mget(self, keys, *args): + """ + Returns a list of values ordered identically to ``keys`` + """ + args = list_or_args(keys, args) + options = {} + if not args: + options[EMPTY_RESPONSE] = [] + return self.execute_command('MGET', *args, **options) + + def mset(self, mapping): + """ + Sets key/values based on a mapping. Mapping is a dictionary of + key/value pairs. Both keys and values should be strings or types that + can be cast to a string via str(). + """ + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('MSET', *items) + + def msetnx(self, mapping): + """ + Sets key/values based on a mapping if none of the keys are already set. + Mapping is a dictionary of key/value pairs. Both keys and values + should be strings or types that can be cast to a string via str(). + Returns a boolean indicating if the operation was successful. + """ + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('MSETNX', *items) + + def move(self, name, db): + "Moves the key ``name`` to a different Redis database ``db``" + return self.execute_command('MOVE', name, db) + + def persist(self, name): + "Removes an expiration on ``name``" + return self.execute_command('PERSIST', name) + + def pexpire(self, name, time): + """ + Set an expire flag on key ``name`` for ``time`` milliseconds. + ``time`` can be represented by an integer or a Python timedelta + object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds() * 1000) + return self.execute_command('PEXPIRE', name, time) + + def pexpireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer representing unix time in milliseconds (unix time * 1000) + or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + ms = int(when.microsecond / 1000) + when = int(mod_time.mktime(when.timetuple())) * 1000 + ms + return self.execute_command('PEXPIREAT', name, when) + + def psetex(self, name, time_ms, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time_ms`` + milliseconds. ``time_ms`` can be represented by an integer or a Python + timedelta object + """ + if isinstance(time_ms, datetime.timedelta): + time_ms = int(time_ms.total_seconds() * 1000) + return self.execute_command('PSETEX', name, time_ms, value) + + def pttl(self, name): + "Returns the number of milliseconds until the key ``name`` will expire" + return self.execute_command('PTTL', name) + + def randomkey(self): + "Returns the name of a random key" + return self.execute_command('RANDOMKEY') + + def rename(self, src, dst): + """ + Rename key ``src`` to ``dst`` + """ + return self.execute_command('RENAME', src, dst) + + def renamenx(self, src, dst): + "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" + return self.execute_command('RENAMENX', src, dst) + + def restore(self, name, ttl, value, replace=False): + """ + Create a key using the provided serialized value, previously obtained + using DUMP. + """ + params = [name, ttl, value] + if replace: + params.append('REPLACE') + return self.execute_command('RESTORE', *params) + + def set(self, name, value, ex=None, px=None, nx=False, xx=False): + """ +name,键 + +value,值 + +ex,过期时间(秒) + +px,过期时间(毫秒) + +nx,如果设置为True,则只有key不存在时,当前set操作才执行,同#setnx(key, value) + +xx,如果设置为True,则只有key存在时,当前set操作才执行 + Set the value at key ``name`` to ``value`` + + ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. + + ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. + + ``nx`` if set to True, set the value at key ``name`` to ``value`` only + if it does not exist. + + ``xx`` if set to True, set the value at key ``name`` to ``value`` only + if it already exists. + """ + pieces = [name, value] + if ex is not None: + pieces.append('EX') + if isinstance(ex, datetime.timedelta): + ex = int(ex.total_seconds()) + pieces.append(ex) + if px is not None: + pieces.append('PX') + if isinstance(px, datetime.timedelta): + px = int(px.total_seconds() * 1000) + pieces.append(px) + + if nx: + pieces.append('NX') + if xx: + pieces.append('XX') + return self.execute_command('SET', *pieces) + + def __setitem__(self, name, value): + self.set(name, value) + + def setbit(self, name, offset, value): + """ + Flag the ``offset`` in ``name`` as ``value``. Returns a boolean + indicating the previous value of ``offset``. + """ + value = value and 1 or 0 + return self.execute_command('SETBIT', name, offset, value) + + def setex(self, name, time, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time`` + seconds. ``time`` can be represented by an integer or a Python + timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds()) + return self.execute_command('SETEX', name, time, value) + + def setnx(self, name, value): + "Set the value of key ``name`` to ``value`` if key doesn't exist" + return self.execute_command('SETNX', name, value) + + def setrange(self, name, offset, value): + """ + Overwrite bytes in the value of ``name`` starting at ``offset`` with + ``value``. If ``offset`` plus the length of ``value`` exceeds the + length of the original value, the new value will be larger than before. + If ``offset`` exceeds the length of the original value, null bytes + will be used to pad between the end of the previous value and the start + of what's being injected. + + Returns the length of the new string. + """ + return self.execute_command('SETRANGE', name, offset, value) + + def strlen(self, name): + "Return the number of bytes stored in the value of ``name``" + return self.execute_command('STRLEN', name) + + def substr(self, name, start, end=-1): + """ + Return a substring of the string at key ``name``. ``start`` and ``end`` + are 0-based integers specifying the portion of the string to return. + """ + return self.execute_command('SUBSTR', name, start, end) + + def touch(self, *args): + """ + Alters the last access time of a key(s) ``*args``. A key is ignored + if it does not exist. + """ + return self.execute_command('TOUCH', *args) + + def ttl(self, name): + "Returns the number of seconds until the key ``name`` will expire" + return self.execute_command('TTL', name) + + def type(self, name): + "Returns the type of key ``name``" + return self.execute_command('TYPE', name) + + def watch(self, *names): + """ + Watches the values at keys ``names``, or None if the key doesn't exist + """ + warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object')) + + def unwatch(self): + """ + Unwatches the value at key ``name``, or None of the key doesn't exist + """ + warnings.warn( + DeprecationWarning('Call UNWATCH from a Pipeline object')) + + def unlink(self, *names): + "Unlink one or more keys specified by ``names``" + return self.execute_command('UNLINK', *names) + + # LIST COMMANDS + def blpop(self, keys, timeout=0): + """ + LPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BLPOP', *keys) + + def brpop(self, keys, timeout=0): + """ + RPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to RPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BRPOP', *keys) + + def brpoplpush(self, src, dst, timeout=0): + """ + Pop a value off the tail of ``src``, push it on the head of ``dst`` + and then return it. + + This command blocks until a value is in ``src`` or until ``timeout`` + seconds elapse, whichever is first. A ``timeout`` value of 0 blocks + forever. + """ + if timeout is None: + timeout = 0 + return self.execute_command('BRPOPLPUSH', src, dst, timeout) + + def lindex(self, name, index): + """ + Return the item from list ``name`` at position ``index`` + + Negative indexes are supported and will return an item at the + end of the list + """ + return self.execute_command('LINDEX', name, index) + + def linsert(self, name, where, refvalue, value): + """ + Insert ``value`` in list ``name`` either immediately before or after + [``where``] ``refvalue`` + + Returns the new length of the list on success or -1 if ``refvalue`` + is not in the list. + """ + return self.execute_command('LINSERT', name, where, refvalue, value) + + def llen(self, name): + "Return the length of the list ``name``" + return self.execute_command('LLEN', name) + + def lpop(self, name): + """元素从list的左边移出 + + name,键 + """ + return self.execute_command('LPOP', name) + + def lpush(self, name, *values): + "元素从list的左边添加,可以添加多个" + return self.execute_command('LPUSH', name, *values) + + def lpushx(self, name, value): + "当name存在时,元素才能从list的右边加入" + return self.execute_command('LPUSHX', name, value) + + def lrange(self, name, start, end): + """ + Return a slice of the list ``name`` between + position ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LRANGE', name, start, end) + + def lrem(self, name, count, value): + """ + Remove the first ``count`` occurrences of elements equal to ``value`` + from the list stored at ``name``. + + The count argument influences the operation in the following ways: + count > 0: Remove elements equal to value moving from head to tail. + count < 0: Remove elements equal to value moving from tail to head. + count = 0: Remove all elements equal to value. + """ + return self.execute_command('LREM', name, count, value) + + def lset(self, name, index, value): + "Set ``position`` of list ``name`` to ``value``" + return self.execute_command('LSET', name, index, value) + + def ltrim(self, name, start, end): + """ + Trim the list ``name``, removing all values not within the slice + between ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LTRIM', name, start, end) + + def rpop(self, name): + "元素从list的右边移出" + return self.execute_command('RPOP', name) + + def rpoplpush(self, src, dst): + """ + RPOP a value off of the ``src`` list and atomically LPUSH it + on to the ``dst`` list. Returns the value. + """ + return self.execute_command('RPOPLPUSH', src, dst) + + def rpush(self, name, *values): + "元素从list的右边添加" + return self.execute_command('RPUSH', name, *values) + + def rpushx(self, name, value): + "当name存在时,元素才能从list的右边加入" + return self.execute_command('RPUSHX', name, value) + + def sort(self, name, start=None, num=None, by=None, get=None, + desc=False, alpha=False, store=None, groups=False): + """ + Sort and return the list, set or sorted set at ``name``. + + ``start`` and ``num`` allow for paging through the sorted data + + ``by`` allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + ``get`` allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where int he key + the item value is located + + ``desc`` allows for reversing the sort + + ``alpha`` allows for sorting lexicographically rather than numerically + + ``store`` allows for storing the result of the sort into + the key ``store`` + + ``groups`` if set to True and if ``get`` contains at least two + elements, sort will return a list of tuples, each containing the + values fetched from the arguments to ``get``. + + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + + pieces = [name] + if by is not None: + pieces.append(b'BY') + pieces.append(by) + if start is not None and num is not None: + pieces.append(b'LIMIT') + pieces.append(start) + pieces.append(num) + if get is not None: + # If get is a string assume we want to get a single value. + # Otherwise assume it's an interable and we want to get multiple + # values. We can't just iterate blindly because strings are + # iterable. + if isinstance(get, (bytes, basestring)): + pieces.append(b'GET') + pieces.append(get) + else: + for g in get: + pieces.append(b'GET') + pieces.append(g) + if desc: + pieces.append(b'DESC') + if alpha: + pieces.append(b'ALPHA') + if store is not None: + pieces.append(b'STORE') + pieces.append(store) + + if groups: + if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: + raise DataError('when using "groups" the "get" argument ' + 'must be specified and contain at least ' + 'two keys') + + options = {'groups': len(get) if groups else None} + return self.execute_command('SORT', *pieces, **options) + + # SCAN COMMANDS + def scan(self, cursor=0, match=None, count=None): + """ + Incrementally return lists of key names. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + return self.execute_command('SCAN', *pieces) + + def scan_iter(self, match=None, count=None): + """ + Make an iterator using the SCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.scan(cursor=cursor, match=match, count=count) + for item in data: + yield item + + def sscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return lists of elements in a set. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + return self.execute_command('SSCAN', *pieces) + + def sscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the SSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.sscan(name, cursor=cursor, + match=match, count=count) + for item in data: + yield item + + def hscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return key/value slices in a hash. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + return self.execute_command('HSCAN', *pieces) + + def hscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the HSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.hscan(name, cursor=cursor, + match=match, count=count) + for item in data.items(): + yield item + + def zscan(self, name, cursor=0, match=None, count=None, + score_cast_func=float): + """ + Incrementally return lists of elements in a sorted set. Also return a + cursor indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + options = {'score_cast_func': score_cast_func} + return self.execute_command('ZSCAN', *pieces, **options) + + def zscan_iter(self, name, match=None, count=None, + score_cast_func=float): + """ + Make an iterator using the ZSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + cursor = '0' + while cursor != 0: + cursor, data = self.zscan(name, cursor=cursor, match=match, + count=count, + score_cast_func=score_cast_func) + for item in data: + yield item + + # SET COMMANDS + def sadd(self, name, *values): + "Add ``value(s)`` to set ``name``" + return self.execute_command('SADD', name, *values) + + def scard(self, name): + "Return the number of elements in set ``name``" + return self.execute_command('SCARD', name) + + def sdiff(self, keys, *args): + "Return the difference of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SDIFF', *args) + + def sdiffstore(self, dest, keys, *args): + """ + Store the difference of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SDIFFSTORE', dest, *args) + + def sinter(self, keys, *args): + "Return the intersection of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SINTER', *args) + + def sinterstore(self, dest, keys, *args): + """ + Store the intersection of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SINTERSTORE', dest, *args) + + def sismember(self, name, value): + "Return a boolean indicating if ``value`` is a member of set ``name``" + return self.execute_command('SISMEMBER', name, value) + + def smembers(self, name): + "Return all members of the set ``name``" + return self.execute_command('SMEMBERS', name) + + def smove(self, src, dst, value): + "Move ``value`` from set ``src`` to set ``dst`` atomically" + return self.execute_command('SMOVE', src, dst, value) + + def spop(self, name, count=None): + "Remove and return a random member of set ``name``" + args = (count is not None) and [count] or [] + return self.execute_command('SPOP', name, *args) + + def srandmember(self, name, number=None): + """ + If ``number`` is None, returns a random member of set ``name``. + + If ``number`` is supplied, returns a list of ``number`` random + memebers of set ``name``. Note this is only available when running + Redis 2.6+. + """ + args = (number is not None) and [number] or [] + return self.execute_command('SRANDMEMBER', name, *args) + + def srem(self, name, *values): + "Remove ``values`` from set ``name``" + return self.execute_command('SREM', name, *values) + + def sunion(self, keys, *args): + "Return the union of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SUNION', *args) + + def sunionstore(self, dest, keys, *args): + """ + Store the union of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SUNIONSTORE', dest, *args) + + # STREAMS COMMANDS + def xack(self, name, groupname, *ids): + """ + Acknowledges the successful processing of one or more messages. + name: name of the stream. + groupname: name of the consumer group. + *ids: message ids to acknowlege. + """ + return self.execute_command('XACK', name, groupname, *ids) + + def xadd(self, name, fields, id='*', maxlen=None, approximate=True): + """ + Add to a stream. + name: name of the stream + fields: dict of field/value pairs to insert into the stream + id: Location to insert this record. By default it is appended. + maxlen: truncate old stream members beyond this size + approximate: actual stream length may be slightly more than maxlen + + """ + pieces = [] + if maxlen is not None: + if not isinstance(maxlen, (int, long)) or maxlen < 1: + raise DataError('XADD maxlen must be a positive integer') + pieces.append(b'MAXLEN') + if approximate: + pieces.append(b'~') + pieces.append(str(maxlen)) + pieces.append(id) + if not isinstance(fields, dict) or len(fields) == 0: + raise DataError('XADD fields must be a non-empty dict') + for pair in iteritems(fields): + pieces.extend(pair) + return self.execute_command('XADD', name, *pieces) + + def xclaim(self, name, groupname, consumername, min_idle_time, message_ids, + idle=None, time=None, retrycount=None, force=False, + justid=False): + """ + Changes the ownership of a pending message. + name: name of the stream. + groupname: name of the consumer group. + consumername: name of a consumer that claims the message. + min_idle_time: filter messages that were idle less than this amount of + milliseconds + message_ids: non-empty list or tuple of message IDs to claim + idle: optional. Set the idle time (last time it was delivered) of the + message in ms + time: optional integer. This is the same as idle but instead of a + relative amount of milliseconds, it sets the idle time to a specific + Unix time (in milliseconds). + retrycount: optional integer. set the retry counter to the specified + value. This counter is incremented every time a message is delivered + again. + force: optional boolean, false by default. Creates the pending message + entry in the PEL even if certain specified IDs are not already in the + PEL assigned to a different client. + justid: optional boolean, false by default. Return just an array of IDs + of messages successfully claimed, without returning the actual message + """ + if not isinstance(min_idle_time, (int, long)) or min_idle_time < 0: + raise DataError("XCLAIM min_idle_time must be a non negative " + "integer") + if not isinstance(message_ids, (list, tuple)) or not message_ids: + raise DataError("XCLAIM message_ids must be a non empty list or " + "tuple of message IDs to claim") + + kwargs = {} + pieces = [name, groupname, consumername, str(min_idle_time)] + pieces.extend(list(message_ids)) + + if idle is not None: + if not isinstance(idle, (int, long)): + raise DataError("XCLAIM idle must be an integer") + pieces.extend((b'IDLE', str(idle))) + if time is not None: + if not isinstance(time, (int, long)): + raise DataError("XCLAIM time must be an integer") + pieces.extend((b'TIME', str(time))) + if retrycount is not None: + if not isinstance(retrycount, (int, long)): + raise DataError("XCLAIM retrycount must be an integer") + pieces.extend((b'RETRYCOUNT', str(retrycount))) + + if force: + if not isinstance(force, bool): + raise DataError("XCLAIM force must be a boolean") + pieces.append(b'FORCE') + if justid: + if not isinstance(justid, bool): + raise DataError("XCLAIM justid must be a boolean") + pieces.append(b'JUSTID') + kwargs['parse_justid'] = True + return self.execute_command('XCLAIM', *pieces, **kwargs) + + def xdel(self, name, *ids): + """ + Deletes one or more messages from a stream. + name: name of the stream. + *ids: message ids to delete. + """ + return self.execute_command('XDEL', name, *ids) + + def xgroup_create(self, name, groupname, id='$', mkstream=False): + """ + Create a new consumer group associated with a stream. + name: name of the stream. + groupname: name of the consumer group. + id: ID of the last item in the stream to consider already delivered. + """ + pieces = ['XGROUP CREATE', name, groupname, id] + if mkstream: + pieces.append(b'MKSTREAM') + return self.execute_command(*pieces) + + def xgroup_delconsumer(self, name, groupname, consumername): + """ + Remove a specific consumer from a consumer group. + Returns the number of pending messages that the consumer had before it + was deleted. + name: name of the stream. + groupname: name of the consumer group. + consumername: name of consumer to delete + """ + return self.execute_command('XGROUP DELCONSUMER', name, groupname, + consumername) + + def xgroup_destroy(self, name, groupname): + """ + Destroy a consumer group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XGROUP DESTROY', name, groupname) + + def xgroup_setid(self, name, groupname, id): + """ + Set the consumer group last delivered ID to something else. + name: name of the stream. + groupname: name of the consumer group. + id: ID of the last item in the stream to consider already delivered. + """ + return self.execute_command('XGROUP SETID', name, groupname, id) + + def xinfo_consumers(self, name, groupname): + """ + Returns general information about the consumers in the group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XINFO CONSUMERS', name, groupname) + + def xinfo_groups(self, name): + """ + Returns general information about the consumer groups of the stream. + name: name of the stream. + """ + return self.execute_command('XINFO GROUPS', name) + + def xinfo_stream(self, name): + """ + Returns general information about the stream. + name: name of the stream. + """ + return self.execute_command('XINFO STREAM', name) + + def xlen(self, name): + """ + Returns the number of elements in a given stream. + """ + return self.execute_command('XLEN', name) + + def xpending(self, name, groupname): + """ + Returns information about pending messages of a group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XPENDING', name, groupname) + + def xpending_range(self, name, groupname, min, max, count, + consumername=None): + """ + Returns information about pending messages, in a range. + name: name of the stream. + groupname: name of the consumer group. + min: minimum stream ID. + max: maximum stream ID. + count: number of messages to return + consumername: name of a consumer to filter by (optional). + """ + pieces = [name, groupname] + if min is not None or max is not None or count is not None: + if min is None or max is None or count is None: + raise DataError("XPENDING must be provided with min, max " + "and count parameters, or none of them. ") + if not isinstance(count, (int, long)) or count < -1: + raise DataError("XPENDING count must be a integer >= -1") + pieces.extend((min, max, str(count))) + if consumername is not None: + if min is None or max is None or count is None: + raise DataError("if XPENDING is provided with consumername," + " it must be provided with min, max and" + " count parameters") + pieces.append(consumername) + return self.execute_command('XPENDING', *pieces, parse_detail=True) + + def xrange(self, name, min='-', max='+', count=None): + """ + Read stream values within an interval. + name: name of the stream. + start: first stream ID. defaults to '-', + meaning the earliest available. + finish: last stream ID. defaults to '+', + meaning the latest available. + count: if set, only return this many items, beginning with the + earliest available. + """ + pieces = [min, max] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XRANGE count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + + return self.execute_command('XRANGE', name, *pieces) + + def xread(self, streams, count=None, block=None): + """ + Block and monitor multiple streams for new data. + streams: a dict of stream names to stream IDs, where + IDs indicate the last ID already seen. + count: if set, only return this many items, beginning with the + earliest available. + block: number of milliseconds to wait, if nothing already present. + """ + pieces = [] + if block is not None: + if not isinstance(block, (int, long)) or block < 0: + raise DataError('XREAD block must be a non-negative integer') + pieces.append(b'BLOCK') + pieces.append(str(block)) + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XREAD count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + if not isinstance(streams, dict) or len(streams) == 0: + raise DataError('XREAD streams must be a non empty dict') + pieces.append(b'STREAMS') + keys, values = izip(*iteritems(streams)) + pieces.extend(keys) + pieces.extend(values) + return self.execute_command('XREAD', *pieces) + + def xreadgroup(self, groupname, consumername, streams, count=None, + block=None, noack=False): + """ + Read from a stream via a consumer group. + groupname: name of the consumer group. + consumername: name of the requesting consumer. + streams: a dict of stream names to stream IDs, where + IDs indicate the last ID already seen. + count: if set, only return this many items, beginning with the + earliest available. + block: number of milliseconds to wait, if nothing already present. + noack: do not add messages to the PEL + """ + pieces = [b'GROUP', groupname, consumername] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError("XREADGROUP count must be a positive integer") + pieces.append(b'COUNT') + pieces.append(str(count)) + if block is not None: + if not isinstance(block, (int, long)) or block < 0: + raise DataError("XREADGROUP block must be a non-negative " + "integer") + pieces.append(b'BLOCK') + pieces.append(str(block)) + if noack: + pieces.append(b'NOACK') + if not isinstance(streams, dict) or len(streams) == 0: + raise DataError('XREADGROUP streams must be a non empty dict') + pieces.append(b'STREAMS') + pieces.extend(streams.keys()) + pieces.extend(streams.values()) + return self.execute_command('XREADGROUP', *pieces) + + def xrevrange(self, name, max='+', min='-', count=None): + """ + Read stream values within an interval, in reverse order. + name: name of the stream + start: first stream ID. defaults to '+', + meaning the latest available. + finish: last stream ID. defaults to '-', + meaning the earliest available. + count: if set, only return this many items, beginning with the + latest available. + """ + pieces = [max, min] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XREVRANGE count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + + return self.execute_command('XREVRANGE', name, *pieces) + + def xtrim(self, name, maxlen, approximate=True): + """ + Trims old messages from a stream. + name: name of the stream. + maxlen: truncate old stream messages beyond this size + approximate: actual stream length may be slightly more than maxlen + """ + pieces = [b'MAXLEN'] + if approximate: + pieces.append(b'~') + pieces.append(maxlen) + return self.execute_command('XTRIM', name, *pieces) + + # SORTED SET COMMANDS + def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): + """ + Set any number of element-name, score pairs to the key ``name``. Pairs + are specified as a dict of element-names keys to score values. + + ``nx`` forces ZADD to only create new elements and not to update + scores for elements that already exist. + + ``xx`` forces ZADD to only update scores of elements that already + exist. New elements will not be added. + + ``ch`` modifies the return value to be the numbers of elements changed. + Changed elements include new elements that were added and elements + whose scores changed. + + ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a + single element/score pair can be specified and the score is the amount + the existing score will be incremented by. When using this mode the + return value of ZADD will be the new score of the element. + + The return value of ZADD varies based on the mode specified. With no + options, ZADD returns the number of new elements added to the sorted + set. + """ + if not mapping: + raise DataError("ZADD requires at least one element/score pair") + if nx and xx: + raise DataError("ZADD allows either 'nx' or 'xx', not both") + if incr and len(mapping) != 1: + raise DataError("ZADD option 'incr' only works when passing a " + "single element/score pair") + pieces = [] + options = {} + if nx: + pieces.append(b'NX') + if xx: + pieces.append(b'XX') + if ch: + pieces.append(b'CH') + if incr: + pieces.append(b'INCR') + options['as_score'] = True + for pair in iteritems(mapping): + pieces.append(pair[1]) + pieces.append(pair[0]) + return self.execute_command('ZADD', name, *pieces, **options) + + def zcard(self, name): + "Return the number of elements in the sorted set ``name``" + return self.execute_command('ZCARD', name) + + def zcount(self, name, min, max): + """ + Returns the number of elements in the sorted set at key ``name`` with + a score between ``min`` and ``max``. + """ + return self.execute_command('ZCOUNT', name, min, max) + + def zincrby(self, name, amount, value): + "Increment the score of ``value`` in sorted set ``name`` by ``amount``" + return self.execute_command('ZINCRBY', name, amount, value) + + def zinterstore(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) + + def zlexcount(self, name, min, max): + """ + Return the number of items in the sorted set ``name`` between the + lexicographical range ``min`` and ``max``. + """ + return self.execute_command('ZLEXCOUNT', name, min, max) + + def zpopmax(self, name, count=None): + """ + Remove and return up to ``count`` members with the highest scores + from the sorted set ``name``. + """ + args = (count is not None) and [count] or [] + options = { + 'withscores': True + } + return self.execute_command('ZPOPMAX', name, *args, **options) + + def zpopmin(self, name, count=None): + """ + Remove and return up to ``count`` members with the lowest scores + from the sorted set ``name``. + """ + args = (count is not None) and [count] or [] + options = { + 'withscores': True + } + return self.execute_command('ZPOPMIN', name, *args, **options) + + def bzpopmax(self, keys, timeout=0): + """ + ZPOPMAX a value off of the first non-empty sorted set + named in the ``keys`` list. + + If none of the sorted sets in ``keys`` has a value to ZPOPMAX, + then block for ``timeout`` seconds, or until a member gets added + to one of the sorted sets. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BZPOPMAX', *keys) + + def bzpopmin(self, keys, timeout=0): + """ + ZPOPMIN a value off of the first non-empty sorted set + named in the ``keys`` list. + + If none of the sorted sets in ``keys`` has a value to ZPOPMIN, + then block for ``timeout`` seconds, or until a member gets added + to one of the sorted sets. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BZPOPMIN', *keys) + + def zrange(self, name, start, end, desc=False, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in ascending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``desc`` a boolean indicating whether to sort the results descendingly + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if desc: + return self.zrevrange(name, start, end, withscores, + score_cast_func) + pieces = ['ZRANGE', name, start, end] + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrangebylex(self, name, min, max, start=None, num=None): + """ + Return the lexicographical range of values from sorted set ``name`` + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYLEX', name, min, max] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + return self.execute_command(*pieces) + + def zrevrangebylex(self, name, max, min, start=None, num=None): + """ + Return the reversed lexicographical range of values from sorted set + ``name`` between ``max`` and ``min``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZREVRANGEBYLEX', name, max, min] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + return self.execute_command(*pieces) + + def zrangebyscore(self, name, min, max, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + `score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYSCORE', name, min, max] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrank(self, name, value): + """ + Returns a 0-based value indicating the rank of ``value`` in sorted set + ``name`` + """ + return self.execute_command('ZRANK', name, value) + + def zrem(self, name, *values): + "Remove member ``values`` from sorted set ``name``" + return self.execute_command('ZREM', name, *values) + + def zremrangebylex(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` between the + lexicographical range specified by ``min`` and ``max``. + + Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYLEX', name, min, max) + + def zremrangebyrank(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with ranks between + ``min`` and ``max``. Values are 0-based, ordered from smallest score + to largest. Values can be negative indicating the highest scores. + Returns the number of elements removed + """ + return self.execute_command('ZREMRANGEBYRANK', name, min, max) + + def zremrangebyscore(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with scores + between ``min`` and ``max``. Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYSCORE', name, min, max) + + def zrevrange(self, name, start, end, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in descending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``withscores`` indicates to return the scores along with the values + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = ['ZREVRANGE', name, start, end] + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrangebyscore(self, name, max, min, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max`` in descending order. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZREVRANGEBYSCORE', name, max, min] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrank(self, name, value): + """ + Returns a 0-based value indicating the descending rank of + ``value`` in sorted set ``name`` + """ + return self.execute_command('ZREVRANK', name, value) + + def zscore(self, name, value): + "Return the score of element ``value`` in sorted set ``name``" + return self.execute_command('ZSCORE', name, value) + + def zunionstore(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + keys, weights = iterkeys(keys), itervalues(keys) + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append(b'WEIGHTS') + pieces.extend(weights) + if aggregate: + pieces.append(b'AGGREGATE') + pieces.append(aggregate) + return self.execute_command(*pieces) + + # HYPERLOGLOG COMMANDS + def pfadd(self, name, *values): + "Adds the specified elements to the specified HyperLogLog." + return self.execute_command('PFADD', name, *values) + + def pfcount(self, *sources): + """ + Return the approximated cardinality of + the set observed by the HyperLogLog at key(s). + """ + return self.execute_command('PFCOUNT', *sources) + + def pfmerge(self, dest, *sources): + "Merge N different HyperLogLogs into a single one." + return self.execute_command('PFMERGE', dest, *sources) + + # HASH COMMANDS + def hdel(self, name, *keys): + "Delete ``keys`` from hash ``name``" + return self.execute_command('HDEL', name, *keys) + + def hexists(self, name, key): + "Returns a boolean indicating if ``key`` exists within hash ``name``" + return self.execute_command('HEXISTS', name, key) + + def hget(self, name, key): + "Return the value of ``key`` within the hash ``name``" + return self.execute_command('HGET', name, key) + + def hgetall(self, name): + "Return a Python dict of the hash's name/value pairs" + return self.execute_command('HGETALL', name) + + def hincrby(self, name, key, amount=1): + "Increment the value of ``key`` in hash ``name`` by ``amount``" + return self.execute_command('HINCRBY', name, key, amount) + + def hincrbyfloat(self, name, key, amount=1.0): + """ + Increment the value of ``key`` in hash ``name`` by floating ``amount`` + """ + return self.execute_command('HINCRBYFLOAT', name, key, amount) + + def hkeys(self, name): + "Return the list of keys within hash ``name``" + return self.execute_command('HKEYS', name) + + def hlen(self, name): + "Return the number of elements in hash ``name``" + return self.execute_command('HLEN', name) + + def hset(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` + Returns 1 if HSET created a new field, otherwise 0 + """ + return self.execute_command('HSET', name, key, value) + + def hsetnx(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` if ``key`` does not + exist. Returns 1 if HSETNX created a field, otherwise 0. + """ + return self.execute_command('HSETNX', name, key, value) + + def hmset(self, name, mapping): + """ + Set key to value within hash ``name`` for each corresponding + key and value from the ``mapping`` dict. + """ + if not mapping: + raise DataError("'hmset' with 'mapping' of length 0") + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('HMSET', name, *items) + + def hmget(self, name, keys, *args): + "Returns a list of values ordered identically to ``keys``" + args = list_or_args(keys, args) + return self.execute_command('HMGET', name, *args) + + def hvals(self, name): + "Return the list of values within hash ``name``" + return self.execute_command('HVALS', name) + + def hstrlen(self, name, key): + """ + Return the number of bytes stored in the value of ``key`` + within hash ``name`` + """ + return self.execute_command('HSTRLEN', name, key) + + def publish(self, channel, message): + """ + Publish ``message`` on ``channel``. + Returns the number of subscribers the message was delivered to. + """ + return self.execute_command('PUBLISH', channel, message) + + def pubsub_channels(self, pattern='*'): + """ + Return a list of channels that have at least one subscriber + """ + return self.execute_command('PUBSUB CHANNELS', pattern) + + def pubsub_numpat(self): + """ + Returns the number of subscriptions to patterns + """ + return self.execute_command('PUBSUB NUMPAT') + + def pubsub_numsub(self, *args): + """ + Return a list of (channel, number of subscribers) tuples + for each channel given in ``*args`` + """ + return self.execute_command('PUBSUB NUMSUB', *args) + + def cluster(self, cluster_arg, *args): + return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args) + + def eval(self, script, numkeys, *keys_and_args): + """ + Execute the Lua ``script``, specifying the ``numkeys`` the script + will touch and the key names and argument values in ``keys_and_args``. + Returns the result of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVAL', script, numkeys, *keys_and_args) + + def evalsha(self, sha, numkeys, *keys_and_args): + """ + Use the ``sha`` to execute a Lua script already registered via EVAL + or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the + key names and argument values in ``keys_and_args``. Returns the result + of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args) + + def script_exists(self, *args): + """ + Check if a script exists in the script cache by specifying the SHAs of + each script as ``args``. Returns a list of boolean values indicating if + if each already script exists in the cache. + """ + return self.execute_command('SCRIPT EXISTS', *args) + + def script_flush(self): + "Flush all scripts from the script cache" + return self.execute_command('SCRIPT FLUSH') + + def script_kill(self): + "Kill the currently executing Lua script" + return self.execute_command('SCRIPT KILL') + + def script_load(self, script): + "Load a Lua ``script`` into the script cache. Returns the SHA." + return self.execute_command('SCRIPT LOAD', script) + + def register_script(self, script): + """ + Register a Lua ``script`` specifying the ``keys`` it will touch. + Returns a Script object that is callable and hides the complexity of + deal with scripts, keys, and shas. This is the preferred way to work + with Lua scripts. + """ + return Script(self, script) + + # GEO COMMANDS + def geoadd(self, name, *values): + """ + Add the specified geospatial items to the specified key identified + by the ``name`` argument. The Geospatial items are given as ordered + members of the ``values`` argument, each item or place is formed by + the triad longitude, latitude and name. + """ + if len(values) % 3 != 0: + raise DataError("GEOADD requires places with lon, lat and name" + " values") + return self.execute_command('GEOADD', name, *values) + + def geodist(self, name, place1, place2, unit=None): + """ + Return the distance between ``place1`` and ``place2`` members of the + ``name`` key. + The units must be one of the following : m, km mi, ft. By default + meters are used. + """ + pieces = [name, place1, place2] + if unit and unit not in ('m', 'km', 'mi', 'ft'): + raise DataError("GEODIST invalid unit") + elif unit: + pieces.append(unit) + return self.execute_command('GEODIST', *pieces) + + def geohash(self, name, *values): + """ + Return the geo hash string for each item of ``values`` members of + the specified key identified by the ``name`` argument. + """ + return self.execute_command('GEOHASH', name, *values) + + def geopos(self, name, *values): + """ + Return the positions of each item of ``values`` as members of + the specified key identified by the ``name`` argument. Each position + is represented by the pairs lon and lat. + """ + return self.execute_command('GEOPOS', name, *values) + + def georadius(self, name, longitude, latitude, radius, unit=None, + withdist=False, withcoord=False, withhash=False, count=None, + sort=None, store=None, store_dist=None): + """ + Return the members of the specified key identified by the + ``name`` argument which are within the borders of the area specified + with the ``latitude`` and ``longitude`` location and the maximum + distance from the center specified by the ``radius`` value. + + The units must be one of the following : m, km mi, ft. By default + + ``withdist`` indicates to return the distances of each place. + + ``withcoord`` indicates to return the latitude and longitude of + each place. + + ``withhash`` indicates to return the geohash string of each place. + + ``count`` indicates to return the number of elements up to N. + + ``sort`` indicates to return the places in a sorted way, ASC for + nearest to fairest and DESC for fairest to nearest. + + ``store`` indicates to save the places names in a sorted set named + with a specific key, each element of the destination sorted set is + populated with the score got from the original geo sorted set. + + ``store_dist`` indicates to save the places names in a sorted set + named with a specific key, instead of ``store`` the sorted set + destination score is set with the distance. + """ + return self._georadiusgeneric('GEORADIUS', + name, longitude, latitude, radius, + unit=unit, withdist=withdist, + withcoord=withcoord, withhash=withhash, + count=count, sort=sort, store=store, + store_dist=store_dist) + + def georadiusbymember(self, name, member, radius, unit=None, + withdist=False, withcoord=False, withhash=False, + count=None, sort=None, store=None, store_dist=None): + """ + This command is exactly like ``georadius`` with the sole difference + that instead of taking, as the center of the area to query, a longitude + and latitude value, it takes the name of a member already existing + inside the geospatial index represented by the sorted set. + """ + return self._georadiusgeneric('GEORADIUSBYMEMBER', + name, member, radius, unit=unit, + withdist=withdist, withcoord=withcoord, + withhash=withhash, count=count, + sort=sort, store=store, + store_dist=store_dist) + + def _georadiusgeneric(self, command, *args, **kwargs): + pieces = list(args) + if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'): + raise DataError("GEORADIUS invalid unit") + elif kwargs['unit']: + pieces.append(kwargs['unit']) + else: + pieces.append('m',) + + for arg_name, byte_repr in ( + ('withdist', b'WITHDIST'), + ('withcoord', b'WITHCOORD'), + ('withhash', b'WITHHASH')): + if kwargs[arg_name]: + pieces.append(byte_repr) + + if kwargs['count']: + pieces.extend([b'COUNT', kwargs['count']]) + + if kwargs['sort']: + if kwargs['sort'] == 'ASC': + pieces.append(b'ASC') + elif kwargs['sort'] == 'DESC': + pieces.append(b'DESC') + else: + raise DataError("GEORADIUS invalid sort") + + if kwargs['store'] and kwargs['store_dist']: + raise DataError("GEORADIUS store and store_dist cant be set" + " together") + + if kwargs['store']: + pieces.extend([b'STORE', kwargs['store']]) + + if kwargs['store_dist']: + pieces.extend([b'STOREDIST', kwargs['store_dist']]) + + return self.execute_command(command, *pieces, **kwargs) + + +StrictRedis = Redis + + +class Monitor(object): + """ + Monitor is useful for handling the MONITOR command to the redis server. + next_command() method returns one command from monitor + listen() method yields commands from monitor. + """ + monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)') + command_re = re.compile(r'"(.*?)(? conn.next_health_check: + conn.send_command('PING', self.HEALTH_CHECK_MESSAGE, + check_health=False) + + def _normalize_keys(self, data): + """ + normalize channel/pattern names to be either bytes or strings + based on whether responses are automatically decoded. this saves us + from coercing the value for each message coming in. + """ + encode = self.encoder.encode + decode = self.encoder.decode + return {decode(encode(k)): v for k, v in iteritems(data)} + + def psubscribe(self, *args, **kwargs): + """ + Subscribe to channel patterns. Patterns supplied as keyword arguments + expect a pattern name as the key and a callable as the value. A + pattern's callable will be invoked automatically when a message is + received on that pattern rather than producing a message via + ``listen()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_patterns = dict.fromkeys(args) + new_patterns.update(kwargs) + ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)) + # update the patterns dict AFTER we send the command. we don't want to + # subscribe twice to these patterns, once for the command and again + # for the reconnection. + new_patterns = self._normalize_keys(new_patterns) + self.patterns.update(new_patterns) + self.pending_unsubscribe_patterns.difference_update(new_patterns) + return ret_val + + def punsubscribe(self, *args): + """ + Unsubscribe from the supplied patterns. If empty, unsubscribe from + all patterns. + """ + if args: + args = list_or_args(args[0], args[1:]) + patterns = self._normalize_keys(dict.fromkeys(args)) + else: + patterns = self.patterns + self.pending_unsubscribe_patterns.update(patterns) + return self.execute_command('PUNSUBSCRIBE', *args) + + def subscribe(self, *args, **kwargs): + """ + Subscribe to channels. Channels supplied as keyword arguments expect + a channel name as the key and a callable as the value. A channel's + callable will be invoked automatically when a message is received on + that channel rather than producing a message via ``listen()`` or + ``get_message()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_channels = dict.fromkeys(args) + new_channels.update(kwargs) + ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels)) + # update the channels dict AFTER we send the command. we don't want to + # subscribe twice to these channels, once for the command and again + # for the reconnection. + new_channels = self._normalize_keys(new_channels) + self.channels.update(new_channels) + self.pending_unsubscribe_channels.difference_update(new_channels) + return ret_val + + def unsubscribe(self, *args): + """ + Unsubscribe from the supplied channels. If empty, unsubscribe from + all channels + """ + if args: + args = list_or_args(args[0], args[1:]) + channels = self._normalize_keys(dict.fromkeys(args)) + else: + channels = self.channels + self.pending_unsubscribe_channels.update(channels) + return self.execute_command('UNSUBSCRIBE', *args) + + def listen(self): + "Listen for messages on channels this client has been subscribed to" + while self.subscribed: + response = self.handle_message(self.parse_response(block=True)) + if response is not None: + yield response + + def get_message(self, ignore_subscribe_messages=False, timeout=0): + """ + Get the next message if one is available, otherwise None. + + If timeout is specified, the system will wait for `timeout` seconds + before returning. Timeout should be specified as a floating point + number. + """ + response = self.parse_response(block=False, timeout=timeout) + if response: + return self.handle_message(response, ignore_subscribe_messages) + return None + + def ping(self, message=None): + """ + Ping the Redis server + """ + message = '' if message is None else message + return self.execute_command('PING', message) + + def handle_message(self, response, ignore_subscribe_messages=False): + """ + Parses a pub/sub message. If the channel or pattern was subscribed to + with a message handler, the handler is invoked instead of a parsed + message being returned. + """ + message_type = nativestr(response[0]) + if message_type == 'pmessage': + message = { + 'type': message_type, + 'pattern': response[1], + 'channel': response[2], + 'data': response[3] + } + elif message_type == 'pong': + message = { + 'type': message_type, + 'pattern': None, + 'channel': None, + 'data': response[1] + } + else: + message = { + 'type': message_type, + 'pattern': None, + 'channel': response[1], + 'data': response[2] + } + + # if this is an unsubscribe message, remove it from memory + if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: + if message_type == 'punsubscribe': + pattern = response[1] + if pattern in self.pending_unsubscribe_patterns: + self.pending_unsubscribe_patterns.remove(pattern) + self.patterns.pop(pattern, None) + else: + channel = response[1] + if channel in self.pending_unsubscribe_channels: + self.pending_unsubscribe_channels.remove(channel) + self.channels.pop(channel, None) + + if message_type in self.PUBLISH_MESSAGE_TYPES: + # if there's a message handler, invoke it + if message_type == 'pmessage': + handler = self.patterns.get(message['pattern'], None) + else: + handler = self.channels.get(message['channel'], None) + if handler: + handler(message) + return None + elif message_type != 'pong': + # this is a subscribe/unsubscribe message. ignore if we don't + # want them + if ignore_subscribe_messages or self.ignore_subscribe_messages: + return None + + return message + + def run_in_thread(self, sleep_time=0, daemon=False): + for channel, handler in iteritems(self.channels): + if handler is None: + raise PubSubError("Channel: '%s' has no handler registered" % + channel) + for pattern, handler in iteritems(self.patterns): + if handler is None: + raise PubSubError("Pattern: '%s' has no handler registered" % + pattern) + + thread = PubSubWorkerThread(self, sleep_time, daemon=daemon) + thread.start() + return thread + + +class PubSubWorkerThread(threading.Thread): + def __init__(self, pubsub, sleep_time, daemon=False): + super(PubSubWorkerThread, self).__init__() + self.daemon = daemon + self.pubsub = pubsub + self.sleep_time = sleep_time + self._running = threading.Event() + + def run(self): + if self._running.is_set(): + return + self._running.set() + pubsub = self.pubsub + sleep_time = self.sleep_time + while self._running.is_set(): + pubsub.get_message(ignore_subscribe_messages=True, + timeout=sleep_time) + pubsub.close() + + def stop(self): + # trip the flag so the run loop exits. the run loop will + # close the pubsub connection, which disconnects the socket + # and returns the connection to the pool. + self._running.clear() + + +class Pipeline(Redis): + """ + Pipelines provide a way to transmit multiple commands to the Redis server + in one transmission. This is convenient for batch processing, such as + saving all the values in a list to Redis. + + All commands executed within a pipeline are wrapped with MULTI and EXEC + calls. This guarantees all commands executed in the pipeline will be + executed atomically. + + Any command raising an exception does *not* halt the execution of + subsequent commands in the pipeline. Instead, the exception is caught + and its instance is placed into the response list returned by execute(). + Code iterating over the response list should be able to deal with an + instance of an exception as a potential value. In general, these will be + ResponseError exceptions, such as those raised when issuing a command + on a key of a different datatype. + """ + + UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'} + + def __init__(self, connection_pool, response_callbacks, transaction, + shard_hint): + self.connection_pool = connection_pool + self.connection = None + self.response_callbacks = response_callbacks + self.transaction = transaction + self.shard_hint = shard_hint + + self.watching = False + self.reset() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.reset() + + def __del__(self): + try: + self.reset() + except Exception: + pass + + def __len__(self): + return len(self.command_stack) + + def reset(self): + self.command_stack = [] + self.scripts = set() + # make sure to reset the connection state in the event that we were + # watching something + if self.watching and self.connection: + try: + # call this manually since our unwatch or + # immediate_execute_command methods can call reset() + self.connection.send_command('UNWATCH') + self.connection.read_response() + except ConnectionError: + # disconnect will also remove any previous WATCHes + self.connection.disconnect() + # clean up the other instance attributes + self.watching = False + self.explicit_transaction = False + # we can safely return the connection to the pool here since we're + # sure we're no longer WATCHing anything + if self.connection: + self.connection_pool.release(self.connection) + self.connection = None + + def multi(self): + """ + Start a transactional block of the pipeline after WATCH commands + are issued. End the transactional block with `execute`. + """ + if self.explicit_transaction: + raise RedisError('Cannot issue nested calls to MULTI') + if self.command_stack: + raise RedisError('Commands without an initial WATCH have already ' + 'been issued') + self.explicit_transaction = True + + def execute_command(self, *args, **kwargs): + if (self.watching or args[0] == 'WATCH') and \ + not self.explicit_transaction: + return self.immediate_execute_command(*args, **kwargs) + return self.pipeline_execute_command(*args, **kwargs) + + def immediate_execute_command(self, *args, **options): + """ + Execute a command immediately, but don't auto-retry on a + ConnectionError if we're already WATCHing a variable. Used when + issuing WATCH or subsequent commands retrieving their values but before + MULTI is called. + """ + command_name = args[0] + conn = self.connection + # if this is the first call, we need a connection + if not conn: + conn = self.connection_pool.get_connection(command_name, + self.shard_hint) + self.connection = conn + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + # if we were already watching a variable, the watch is no longer + # valid since this connection has died. raise a WatchError, which + # indicates the user should retry this transaction. + if self.watching: + self.reset() + raise WatchError("A ConnectionError occured on while watching " + "one or more keys") + # if retry_on_timeout is not set, or the error is not + # a TimeoutError, raise it + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + self.reset() + raise + + # retry_on_timeout is set, this is a TimeoutError and we are not + # already WATCHing any variables. retry the command. + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError): + # a subsequent failure should simply be raised + self.reset() + raise + + def pipeline_execute_command(self, *args, **options): + """ + Stage a command to be executed when execute() is next called + + Returns the current Pipeline object back so commands can be + chained together, such as: + + pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') + + At some other point, you can then run: pipe.execute(), + which will execute all commands queued in the pipe. + """ + self.command_stack.append((args, options)) + return self + + def _execute_transaction(self, connection, commands, raise_on_error): + cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})]) + all_cmds = connection.pack_commands([args for args, options in cmds + if EMPTY_RESPONSE not in options]) + connection.send_packed_command(all_cmds) + errors = [] + + # parse off the response for MULTI + # NOTE: we need to handle ResponseErrors here and continue + # so that we read all the additional command messages from + # the socket + try: + self.parse_response(connection, '_') + except ResponseError: + errors.append((0, sys.exc_info()[1])) + + # and all the other commands + for i, command in enumerate(commands): + if EMPTY_RESPONSE in command[1]: + errors.append((i, command[1][EMPTY_RESPONSE])) + else: + try: + self.parse_response(connection, '_') + except ResponseError: + ex = sys.exc_info()[1] + self.annotate_exception(ex, i + 1, command[0]) + errors.append((i, ex)) + + # parse the EXEC. + try: + response = self.parse_response(connection, '_') + except ExecAbortError: + if self.explicit_transaction: + self.immediate_execute_command('DISCARD') + if errors: + raise errors[0][1] + raise sys.exc_info()[1] + + if response is None: + raise WatchError("Watched variable changed.") + + # put any parse errors into the response + for i, e in errors: + response.insert(i, e) + + if len(response) != len(commands): + self.connection.disconnect() + raise ResponseError("Wrong number of response items from " + "pipeline execution") + + # find any errors in the response and raise if necessary + if raise_on_error: + self.raise_first_error(commands, response) + + # We have to run response callbacks manually + data = [] + for r, cmd in izip(response, commands): + if not isinstance(r, Exception): + args, options = cmd + command_name = args[0] + if command_name in self.response_callbacks: + r = self.response_callbacks[command_name](r, **options) + data.append(r) + return data + + def _execute_pipeline(self, connection, commands, raise_on_error): + # build up all commands into a single request to increase network perf + all_cmds = connection.pack_commands([args for args, _ in commands]) + connection.send_packed_command(all_cmds) + + response = [] + for args, options in commands: + try: + response.append( + self.parse_response(connection, args[0], **options)) + except ResponseError: + response.append(sys.exc_info()[1]) + + if raise_on_error: + self.raise_first_error(commands, response) + return response + + def raise_first_error(self, commands, response): + for i, r in enumerate(response): + if isinstance(r, ResponseError): + self.annotate_exception(r, i + 1, commands[i][0]) + raise r + + def annotate_exception(self, exception, number, command): + cmd = ' '.join(imap(safe_unicode, command)) + msg = 'Command # %d (%s) of pipeline caused error: %s' % ( + number, cmd, safe_unicode(exception.args[0])) + exception.args = (msg,) + exception.args[1:] + + def parse_response(self, connection, command_name, **options): + result = Redis.parse_response( + self, connection, command_name, **options) + if command_name in self.UNWATCH_COMMANDS: + self.watching = False + elif command_name == 'WATCH': + self.watching = True + return result + + def load_scripts(self): + # make sure all scripts that are about to be run on this pipeline exist + scripts = list(self.scripts) + immediate = self.immediate_execute_command + shas = [s.sha for s in scripts] + # we can't use the normal script_* methods because they would just + # get buffered in the pipeline. + exists = immediate('SCRIPT EXISTS', *shas) + if not all(exists): + for s, exist in izip(scripts, exists): + if not exist: + s.sha = immediate('SCRIPT LOAD', s.script) + + def execute(self, raise_on_error=True): + "Execute all the commands in the current pipeline" + stack = self.command_stack + if not stack: + return [] + if self.scripts: + self.load_scripts() + if self.transaction or self.explicit_transaction: + execute = self._execute_transaction + else: + execute = self._execute_pipeline + + conn = self.connection + if not conn: + conn = self.connection_pool.get_connection('MULTI', + self.shard_hint) + # assign to self.connection so reset() releases the connection + # back to the pool after we're done + self.connection = conn + + try: + return execute(conn, stack, raise_on_error) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + # if we were watching a variable, the watch is no longer valid + # since this connection has died. raise a WatchError, which + # indicates the user should retry this transaction. + if self.watching: + raise WatchError("A ConnectionError occured on while watching " + "one or more keys") + # if retry_on_timeout is not set, or the error is not + # a TimeoutError, raise it + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + raise + # retry a TimeoutError when retry_on_timeout is set + return execute(conn, stack, raise_on_error) + finally: + self.reset() + + def watch(self, *names): + "Watches the values at keys ``names``" + if self.explicit_transaction: + raise RedisError('Cannot issue a WATCH after a MULTI') + return self.execute_command('WATCH', *names) + + def unwatch(self): + "Unwatches all previously specified keys" + return self.watching and self.execute_command('UNWATCH') or True + + +class Script(object): + "An executable Lua script object returned by ``register_script``" + + def __init__(self, registered_client, script): + self.registered_client = registered_client + self.script = script + # Precalculate and store the SHA1 hex digest of the script. + + if isinstance(script, basestring): + # We need the encoding from the client in order to generate an + # accurate byte representation of the script + encoder = registered_client.connection_pool.get_encoder() + script = encoder.encode(script) + self.sha = hashlib.sha1(script).hexdigest() + + def __call__(self, keys=[], args=[], client=None): + "Execute the script, passing any required ``args``" + if client is None: + client = self.registered_client + args = tuple(keys) + tuple(args) + # make sure the Redis server knows about the script + if isinstance(client, Pipeline): + # Make sure the pipeline can register the script before executing. + client.scripts.add(self) + try: + return client.evalsha(self.sha, len(keys), *args) + except NoScriptError: + # Maybe the client is pointed to a differnet server than the client + # that created this instance? + # Overwrite the sha just in case there was a discrepancy. + self.sha = client.script_load(self.script) + return client.evalsha(self.sha, len(keys), *args) + + +class BitFieldOperation(object): + """ + Command builder for BITFIELD commands. + """ + def __init__(self, client, key, default_overflow=None): + self.client = client + self.key = key + self._default_overflow = default_overflow + self.reset() + + def reset(self): + """ + Reset the state of the instance to when it was constructed + """ + self.operations = [] + self._last_overflow = 'WRAP' + self.overflow(self._default_overflow or self._last_overflow) + + def overflow(self, overflow): + """ + Update the overflow algorithm of successive INCRBY operations + :param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the + Redis docs for descriptions of these algorithmsself. + :returns: a :py:class:`BitFieldOperation` instance. + """ + overflow = overflow.upper() + if overflow != self._last_overflow: + self._last_overflow = overflow + self.operations.append(('OVERFLOW', overflow)) + return self + + def incrby(self, fmt, offset, increment, overflow=None): + """ + Increment a bitfield by a given amount. + :param fmt: format-string for the bitfield being updated, e.g. 'u8' + for an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :param int increment: value to increment the bitfield by. + :param str overflow: overflow algorithm. Defaults to WRAP, but other + acceptable values are SAT and FAIL. See the Redis docs for + descriptions of these algorithms. + :returns: a :py:class:`BitFieldOperation` instance. + """ + if overflow is not None: + self.overflow(overflow) + + self.operations.append(('INCRBY', fmt, offset, increment)) + return self + + def get(self, fmt, offset): + """ + Get the value of a given bitfield. + :param fmt: format-string for the bitfield being read, e.g. 'u8' for + an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :returns: a :py:class:`BitFieldOperation` instance. + """ + self.operations.append(('GET', fmt, offset)) + return self + + def set(self, fmt, offset, value): + """ + Set the value of a given bitfield. + :param fmt: format-string for the bitfield being read, e.g. 'u8' for + an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :param int value: value to set at the given position. + :returns: a :py:class:`BitFieldOperation` instance. + """ + self.operations.append(('SET', fmt, offset, value)) + return self + + @property + def command(self): + cmd = ['BITFIELD', self.key] + for ops in self.operations: + cmd.extend(ops) + return cmd + + def execute(self): + """ + Execute the operation(s) in a single BITFIELD command. The return value + is a list of values corresponding to each operation. If the client + used to create this instance was a pipeline, the list of values + will be present within the pipeline's execute. + """ + command = self.command + self.reset() + return self.client.execute_command(*command) diff --git a/utill/rediss/connection.py b/utill/rediss/connection.py new file mode 100644 index 0000000..2968e74 --- /dev/null +++ b/utill/rediss/connection.py @@ -0,0 +1,1261 @@ +from __future__ import unicode_literals +from distutils.version import StrictVersion +from itertools import chain +from time import time +import errno +import io +import os +import socket +import sys +import threading +import warnings + +from ._compat import (xrange, imap, byte_to_chr, unicode, long, + nativestr, basestring, iteritems, + LifoQueue, Empty, Full, urlparse, parse_qs, + recv, recv_into, unquote, BlockingIOError) +from .exceptions import ( + AuthenticationError, + BusyLoadingError, + ConnectionError, + DataError, + ExecAbortError, + InvalidResponse, + NoScriptError, + ReadOnlyError, + RedisError, + ResponseError, + TimeoutError, +) +from .utils import HIREDIS_AVAILABLE + +try: + import ssl + ssl_available = True +except ImportError: + ssl_available = False + +NONBLOCKING_EXCEPTION_ERROR_NUMBERS = { + BlockingIOError: errno.EWOULDBLOCK, +} + +if ssl_available: + if hasattr(ssl, 'SSLWantReadError'): + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2 + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2 + else: + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2 + +# In Python 2.7 a socket.error is raised for a nonblocking read. +# The _compat module aliases BlockingIOError to socket.error to be +# Python 2/3 compatible. +# However this means that all socket.error exceptions need to be handled +# properly within these exception handlers. +# We need to make sure socket.error is included in these handlers and +# provide a dummy error number that will never match a real exception. +if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS: + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999 + +NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys()) + +if HIREDIS_AVAILABLE: + import hiredis + + hiredis_version = StrictVersion(hiredis.__version__) + HIREDIS_SUPPORTS_CALLABLE_ERRORS = \ + hiredis_version >= StrictVersion('0.1.3') + HIREDIS_SUPPORTS_BYTE_BUFFER = \ + hiredis_version >= StrictVersion('0.1.4') + HIREDIS_SUPPORTS_ENCODING_ERRORS = \ + hiredis_version >= StrictVersion('1.0.0') + + if not HIREDIS_SUPPORTS_BYTE_BUFFER: + msg = ("redis-py works best with hiredis >= 0.1.4. You're running " + "hiredis %s. Please consider upgrading." % hiredis.__version__) + warnings.warn(msg) + + HIREDIS_USE_BYTE_BUFFER = True + # only use byte buffer if hiredis supports it + if not HIREDIS_SUPPORTS_BYTE_BUFFER: + HIREDIS_USE_BYTE_BUFFER = False + +SYM_STAR = b'*' +SYM_DOLLAR = b'$' +SYM_CRLF = b'\r\n' +SYM_EMPTY = b'' + +SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." + +SENTINEL = object() + + +class Encoder(object): + "Encode strings to bytes and decode bytes to strings" + + def __init__(self, encoding, encoding_errors, decode_responses): + self.encoding = encoding + self.encoding_errors = encoding_errors + self.decode_responses = decode_responses + + def encode(self, value): + "Return a bytestring representation of the value" + if isinstance(value, bytes): + return value + elif isinstance(value, bool): + # special case bool since it is a subclass of int + raise DataError("Invalid input of type: 'bool'. Convert to a " + "byte, string or number first.") + elif isinstance(value, float): + value = repr(value).encode() + elif isinstance(value, (int, long)): + # python 2 repr() on longs is '123L', so use str() instead + value = str(value).encode() + elif not isinstance(value, basestring): + # a value we don't know how to deal with. throw an error + typename = type(value).__name__ + raise DataError("Invalid input of type: '%s'. Convert to a " + "byte, string or number first." % typename) + if isinstance(value, unicode): + value = value.encode(self.encoding, self.encoding_errors) + return value + + def decode(self, value, force=False): + "Return a unicode string from the byte representation" + if (self.decode_responses or force) and isinstance(value, bytes): + value = value.decode(self.encoding, self.encoding_errors) + return value + + +class BaseParser(object): + EXCEPTION_CLASSES = { + 'ERR': { + 'max number of clients reached': ConnectionError, + 'Client sent AUTH, but no password is set': AuthenticationError, + 'invalid password': AuthenticationError, + }, + 'EXECABORT': ExecAbortError, + 'LOADING': BusyLoadingError, + 'NOSCRIPT': NoScriptError, + 'READONLY': ReadOnlyError, + 'NOAUTH': AuthenticationError, + } + + def parse_error(self, response): + "Parse an error response" + error_code = response.split(' ')[0] + if error_code in self.EXCEPTION_CLASSES: + response = response[len(error_code) + 1:] + exception_class = self.EXCEPTION_CLASSES[error_code] + if isinstance(exception_class, dict): + exception_class = exception_class.get(response, ResponseError) + return exception_class(response) + return ResponseError(response) + + +class SocketBuffer(object): + def __init__(self, socket, socket_read_size, socket_timeout): + self._sock = socket + self.socket_read_size = socket_read_size + self.socket_timeout = socket_timeout + self._buffer = io.BytesIO() + # number of bytes written to the buffer from the socket + self.bytes_written = 0 + # number of bytes read from the buffer + self.bytes_read = 0 + + @property + def length(self): + return self.bytes_written - self.bytes_read + + def _read_from_socket(self, length=None, timeout=SENTINEL, + raise_on_timeout=True): + sock = self._sock + socket_read_size = self.socket_read_size + buf = self._buffer + buf.seek(self.bytes_written) + marker = 0 + custom_timeout = timeout is not SENTINEL + + try: + if custom_timeout: + sock.settimeout(timeout) + while True: + data = recv(self._sock, socket_read_size) + # an empty string indicates the server shutdown the socket + if isinstance(data, bytes) and len(data) == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + buf.write(data) + data_length = len(data) + self.bytes_written += data_length + marker += data_length + + if length is not None and length > marker: + continue + return True + except socket.timeout: + if raise_on_timeout: + raise TimeoutError("Timeout reading from socket") + return False + except NONBLOCKING_EXCEPTIONS as ex: + # if we're in nonblocking mode and the recv raises a + # blocking error, simply return False indicating that + # there's no data to be read. otherwise raise the + # original exception. + allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) + if not raise_on_timeout and ex.errno == allowed: + return False + raise ConnectionError("Error while reading from socket: %s" % + (ex.args,)) + finally: + if custom_timeout: + sock.settimeout(self.socket_timeout) + + def can_read(self, timeout): + return bool(self.length) or \ + self._read_from_socket(timeout=timeout, + raise_on_timeout=False) + + def read(self, length): + length = length + 2 # make sure to read the \r\n terminator + # make sure we've read enough data from the socket + if length > self.length: + self._read_from_socket(length - self.length) + + self._buffer.seek(self.bytes_read) + data = self._buffer.read(length) + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def readline(self): + buf = self._buffer + buf.seek(self.bytes_read) + data = buf.readline() + while not data.endswith(SYM_CRLF): + # there's more data in the socket that we need + self._read_from_socket() + buf.seek(self.bytes_read) + data = buf.readline() + + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def purge(self): + self._buffer.seek(0) + self._buffer.truncate() + self.bytes_written = 0 + self.bytes_read = 0 + + def close(self): + try: + self.purge() + self._buffer.close() + except Exception: + # issue #633 suggests the purge/close somehow raised a + # BadFileDescriptor error. Perhaps the client ran out of + # memory or something else? It's probably OK to ignore + # any error being raised from purge/close since we're + # removing the reference to the instance below. + pass + self._buffer = None + self._sock = None + + +class PythonParser(BaseParser): + "Plain Python parsing class" + def __init__(self, socket_read_size): + self.socket_read_size = socket_read_size + self.encoder = None + self._sock = None + self._buffer = None + + def __del__(self): + try: + self.on_disconnect() + except Exception: + pass + + def on_connect(self, connection): + "Called when the socket connects" + self._sock = connection._sock + self._buffer = SocketBuffer(self._sock, + self.socket_read_size, + connection.socket_timeout) + self.encoder = connection.encoder + + def on_disconnect(self): + "Called when the socket disconnects" + self._sock = None + if self._buffer is not None: + self._buffer.close() + self._buffer = None + self.encoder = None + + def can_read(self, timeout): + return self._buffer and self._buffer.can_read(timeout) + + def read_response(self): + response = self._buffer.readline() + if not response: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + byte, response = byte_to_chr(response[0]), response[1:] + + if byte not in ('-', '+', ':', '$', '*'): + raise InvalidResponse("Protocol Error: %s, %s" % + (str(byte), str(response))) + + # server returned an error + if byte == '-': + response = nativestr(response) + error = self.parse_error(response) + # if the error is a ConnectionError, raise immediately so the user + # is notified + if isinstance(error, ConnectionError): + raise error + # otherwise, we're dealing with a ResponseError that might belong + # inside a pipeline response. the connection's read_response() + # and/or the pipeline's execute() will raise this error if + # necessary, so just return the exception instance here. + return error + # single value + elif byte == '+': + pass + # int value + elif byte == ':': + response = long(response) + # bulk response + elif byte == '$': + length = int(response) + if length == -1: + return None + response = self._buffer.read(length) + # multi-bulk response + elif byte == '*': + length = int(response) + if length == -1: + return None + response = [self.read_response() for i in xrange(length)] + if isinstance(response, bytes): + response = self.encoder.decode(response) + return response + + +class HiredisParser(BaseParser): + "Parser class for connections using Hiredis" + def __init__(self, socket_read_size): + if not HIREDIS_AVAILABLE: + raise RedisError("Hiredis is not installed") + self.socket_read_size = socket_read_size + + if HIREDIS_USE_BYTE_BUFFER: + self._buffer = bytearray(socket_read_size) + + def __del__(self): + try: + self.on_disconnect() + except Exception: + pass + + def on_connect(self, connection): + self._sock = connection._sock + self._socket_timeout = connection.socket_timeout + kwargs = { + 'protocolError': InvalidResponse, + 'replyError': self.parse_error, + } + + # hiredis < 0.1.3 doesn't support functions that create exceptions + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + kwargs['replyError'] = ResponseError + + if connection.encoder.decode_responses: + kwargs['encoding'] = connection.encoder.encoding + if HIREDIS_SUPPORTS_ENCODING_ERRORS: + kwargs['errors'] = connection.encoder.encoding_errors + self._reader = hiredis.Reader(**kwargs) + self._next_response = False + + def on_disconnect(self): + self._sock = None + self._reader = None + self._next_response = False + + def can_read(self, timeout): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + if self._next_response is False: + self._next_response = self._reader.gets() + if self._next_response is False: + return self.read_from_socket(timeout=timeout, + raise_on_timeout=False) + return True + + def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True): + sock = self._sock + custom_timeout = timeout is not SENTINEL + try: + if custom_timeout: + sock.settimeout(timeout) + if HIREDIS_USE_BYTE_BUFFER: + bufflen = recv_into(self._sock, self._buffer) + if bufflen == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + self._reader.feed(self._buffer, 0, bufflen) + else: + buffer = recv(self._sock, self.socket_read_size) + # an empty string indicates the server shutdown the socket + if not isinstance(buffer, bytes) or len(buffer) == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + self._reader.feed(buffer) + # data was read from the socket and added to the buffer. + # return True to indicate that data was read. + return True + except socket.timeout: + if raise_on_timeout: + raise TimeoutError("Timeout reading from socket") + return False + except NONBLOCKING_EXCEPTIONS as ex: + # if we're in nonblocking mode and the recv raises a + # blocking error, simply return False indicating that + # there's no data to be read. otherwise raise the + # original exception. + allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) + if not raise_on_timeout and ex.errno == allowed: + return False + raise ConnectionError("Error while reading from socket: %s" % + (ex.args,)) + finally: + if custom_timeout: + sock.settimeout(self._socket_timeout) + + def read_response(self): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + # _next_response might be cached from a can_read() call + if self._next_response is not False: + response = self._next_response + self._next_response = False + return response + + response = self._reader.gets() + while response is False: + self.read_from_socket() + response = self._reader.gets() + # if an older version of hiredis is installed, we need to attempt + # to convert ResponseErrors to their appropriate types. + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + if isinstance(response, ResponseError): + response = self.parse_error(response.args[0]) + elif isinstance(response, list) and response and \ + isinstance(response[0], ResponseError): + response[0] = self.parse_error(response[0].args[0]) + # if the response is a ConnectionError or the response is a list and + # the first item is a ConnectionError, raise it as something bad + # happened + if isinstance(response, ConnectionError): + raise response + elif isinstance(response, list) and response and \ + isinstance(response[0], ConnectionError): + raise response[0] + return response + + +if HIREDIS_AVAILABLE: + DefaultParser = HiredisParser +else: + DefaultParser = PythonParser + + +class Connection(object): + "Manages TCP communication to and from a Redis server" + description_format = "Connection" + + def __init__(self, host='localhost', port=6379, db=0, password=None, + socket_timeout=None, socket_connect_timeout=None, + socket_keepalive=False, socket_keepalive_options=None, + socket_type=0, retry_on_timeout=False, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + parser_class=DefaultParser, socket_read_size=65536, + health_check_interval=0): + self.pid = os.getpid() + self.host = host + self.port = int(port) + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self.socket_connect_timeout = socket_connect_timeout or socket_timeout + self.socket_keepalive = socket_keepalive + self.socket_keepalive_options = socket_keepalive_options or {} + self.socket_type = socket_type + self.retry_on_timeout = retry_on_timeout + self.health_check_interval = health_check_interval + self.next_health_check = 0 + self.encoder = Encoder(encoding, encoding_errors, decode_responses) + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._description_args = { + 'host': self.host, + 'port': self.port, + 'db': self.db, + } + self._connect_callbacks = [] + self._buffer_cutoff = 6000 + + def __repr__(self): + return self.description_format % self._description_args + + def __del__(self): + try: + self.disconnect() + except Exception: + pass + + def register_connect_callback(self, callback): + self._connect_callbacks.append(callback) + + def clear_connect_callbacks(self): + self._connect_callbacks = [] + + def connect(self): + "Connects to the Redis server if not already connected" + if self._sock: + return + try: + sock = self._connect() + except socket.timeout: + raise TimeoutError("Timeout connecting to server") + except socket.error: + e = sys.exc_info()[1] + raise ConnectionError(self._error_message(e)) + + self._sock = sock + try: + self.on_connect() + except RedisError: + # clean up after any error in on_connect + self.disconnect() + raise + + # run any user callbacks. right now the only internal callback + # is for pubsub channel/pattern resubscription + for callback in self._connect_callbacks: + callback(self) + + def _connect(self): + "Create a TCP socket connection" + # we want to mimic what socket.create_connection does to support + # ipv4/ipv6, but we want to set options prior to calling + # socket.connect() + err = None + for res in socket.getaddrinfo(self.host, self.port, self.socket_type, + socket.SOCK_STREAM): + family, socktype, proto, canonname, socket_address = res + sock = None + try: + sock = socket.socket(family, socktype, proto) + # TCP_NODELAY + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + # TCP_KEEPALIVE + if self.socket_keepalive: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + for k, v in iteritems(self.socket_keepalive_options): + sock.setsockopt(socket.IPPROTO_TCP, k, v) + + # set the socket_connect_timeout before we connect + sock.settimeout(self.socket_connect_timeout) + + # connect + sock.connect(socket_address) + + # set the socket_timeout now that we're connected + sock.settimeout(self.socket_timeout) + return sock + + except socket.error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + raise socket.error("socket.getaddrinfo returned an empty list") + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to %s:%s. %s." % \ + (self.host, self.port, exception.args[0]) + else: + return "Error %s connecting to %s:%s. %s." % \ + (exception.args[0], self.host, self.port, exception.args[1]) + + def on_connect(self): + "Initialize the connection, authenticate and select a database" + self._parser.on_connect(self) + + # if a password is specified, authenticate + if self.password: + # avoid checking health here -- PING will fail if we try + # to check the health prior to the AUTH + self.send_command('AUTH', self.password, check_health=False) + if nativestr(self.read_response()) != 'OK': + raise AuthenticationError('Invalid Password') + + # if a database is specified, switch to it + if self.db: + self.send_command('SELECT', self.db) + if nativestr(self.read_response()) != 'OK': + raise ConnectionError('Invalid Database') + + def disconnect(self): + "Disconnects from the Redis server" + self._parser.on_disconnect() + if self._sock is None: + return + try: + if os.getpid() == self.pid: + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + except socket.error: + pass + self._sock = None + + def check_health(self): + "Check the health of the connection with a PING/PONG" + if self.health_check_interval and time() > self.next_health_check: + try: + self.send_command('PING', check_health=False) + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError( + 'Bad response from PING health check') + except (ConnectionError, TimeoutError) as ex: + self.disconnect() + self.send_command('PING', check_health=False) + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError( + 'Bad response from PING health check') + + def send_packed_command(self, command, check_health=True): + "Send an already packed command to the Redis server" + if not self._sock: + self.connect() + # guard against health check recurrsion + if check_health: + self.check_health() + try: + if isinstance(command, str): + command = [command] + for item in command: + self._sock.sendall(item) + except socket.timeout: + self.disconnect() + raise TimeoutError("Timeout writing to socket") + except socket.error: + e = sys.exc_info()[1] + self.disconnect() + if len(e.args) == 1: + errno, errmsg = 'UNKNOWN', e.args[0] + else: + errno = e.args[0] + errmsg = e.args[1] + raise ConnectionError("Error %s while writing to socket. %s." % + (errno, errmsg)) + except: # noqa: E722 + self.disconnect() + raise + + def send_command(self, *args, **kwargs): + "Pack and send a command to the Redis server" + self.send_packed_command(self.pack_command(*args), + check_health=kwargs.get('check_health', True)) + + def can_read(self, timeout=0): + "Poll the socket to see if there's data that can be read." + sock = self._sock + if not sock: + self.connect() + sock = self._sock + return self._parser.can_read(timeout) + + def read_response(self): + "Read the response from a previously sent command" + try: + response = self._parser.read_response() + except socket.timeout: + self.disconnect() + raise TimeoutError("Timeout reading from %s:%s" % + (self.host, self.port)) + except socket.error: + self.disconnect() + e = sys.exc_info()[1] + raise ConnectionError("Error while reading from %s:%s : %s" % + (self.host, self.port, e.args)) + except: # noqa: E722 + self.disconnect() + raise + + if self.health_check_interval: + self.next_health_check = time() + self.health_check_interval + + if isinstance(response, ResponseError): + raise response + return response + + def pack_command(self, *args): + "Pack a series of arguments into the Redis protocol" + output = [] + # the client might have included 1 or more literal arguments in + # the command name, e.g., 'CONFIG GET'. The Redis server expects these + # arguments to be sent separately, so split the first argument + # manually. These arguments should be bytestrings so that they are + # not encoded. + if isinstance(args[0], unicode): + args = tuple(args[0].encode().split()) + args[1:] + elif b' ' in args[0]: + args = tuple(args[0].split()) + args[1:] + + buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) + + buffer_cutoff = self._buffer_cutoff + for arg in imap(self.encoder.encode, args): + # to avoid large string mallocs, chunk the command into the + # output list if we're sending large values + if len(buff) > buffer_cutoff or len(arg) > buffer_cutoff: + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, str(len(arg)).encode(), SYM_CRLF)) + output.append(buff) + output.append(arg) + buff = SYM_CRLF + else: + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, str(len(arg)).encode(), + SYM_CRLF, arg, SYM_CRLF)) + output.append(buff) + return output + + def pack_commands(self, commands): + "Pack multiple commands into the Redis protocol" + output = [] + pieces = [] + buffer_length = 0 + buffer_cutoff = self._buffer_cutoff + + for cmd in commands: + for chunk in self.pack_command(*cmd): + chunklen = len(chunk) + if buffer_length > buffer_cutoff or chunklen > buffer_cutoff: + output.append(SYM_EMPTY.join(pieces)) + buffer_length = 0 + pieces = [] + + if chunklen > self._buffer_cutoff: + output.append(chunk) + else: + pieces.append(chunk) + buffer_length += chunklen + + if pieces: + output.append(SYM_EMPTY.join(pieces)) + return output + + +class SSLConnection(Connection): + description_format = "SSLConnection" + + def __init__(self, ssl_keyfile=None, ssl_certfile=None, + ssl_cert_reqs='required', ssl_ca_certs=None, **kwargs): + if not ssl_available: + raise RedisError("Python wasn't built with SSL support") + + super(SSLConnection, self).__init__(**kwargs) + + self.keyfile = ssl_keyfile + self.certfile = ssl_certfile + if ssl_cert_reqs is None: + ssl_cert_reqs = ssl.CERT_NONE + elif isinstance(ssl_cert_reqs, basestring): + CERT_REQS = { + 'none': ssl.CERT_NONE, + 'optional': ssl.CERT_OPTIONAL, + 'required': ssl.CERT_REQUIRED + } + if ssl_cert_reqs not in CERT_REQS: + raise RedisError( + "Invalid SSL Certificate Requirements Flag: %s" % + ssl_cert_reqs) + ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] + self.cert_reqs = ssl_cert_reqs + self.ca_certs = ssl_ca_certs + + def _connect(self): + "Wrap the socket with SSL support" + sock = super(SSLConnection, self)._connect() + if hasattr(ssl, "create_default_context"): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = self.cert_reqs + if self.certfile and self.keyfile: + context.load_cert_chain(certfile=self.certfile, + keyfile=self.keyfile) + if self.ca_certs: + context.load_verify_locations(self.ca_certs) + sock = context.wrap_socket(sock, server_hostname=self.host) + else: + # In case this code runs in a version which is older than 2.7.9, + # we want to fall back to old code + sock = ssl.wrap_socket(sock, + cert_reqs=self.cert_reqs, + keyfile=self.keyfile, + certfile=self.certfile, + ca_certs=self.ca_certs) + return sock + + +class UnixDomainSocketConnection(Connection): + description_format = "UnixDomainSocketConnection" + + def __init__(self, path='', db=0, password=None, + socket_timeout=None, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + retry_on_timeout=False, + parser_class=DefaultParser, socket_read_size=65536, + health_check_interval=0): + self.pid = os.getpid() + self.path = path + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self.retry_on_timeout = retry_on_timeout + self.health_check_interval = health_check_interval + self.next_health_check = 0 + self.encoder = Encoder(encoding, encoding_errors, decode_responses) + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._description_args = { + 'path': self.path, + 'db': self.db, + } + self._connect_callbacks = [] + self._buffer_cutoff = 6000 + + def _connect(self): + "Create a Unix domain socket connection" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(self.socket_timeout) + sock.connect(self.path) + return sock + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to unix socket: %s. %s." % \ + (self.path, exception.args[0]) + else: + return "Error %s connecting to unix socket: %s. %s." % \ + (exception.args[0], self.path, exception.args[1]) + + +FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO') + + +def to_bool(value): + if value is None or value == '': + return None + if isinstance(value, basestring) and value.upper() in FALSE_STRINGS: + return False + return bool(value) + + +URL_QUERY_ARGUMENT_PARSERS = { + 'socket_timeout': float, + 'socket_connect_timeout': float, + 'socket_keepalive': to_bool, + 'retry_on_timeout': to_bool, + 'max_connections': int, + 'health_check_interval': int, +} + + +class ConnectionPool(object): + "Generic connection pool" + @classmethod + def from_url(cls, url, db=None, decode_components=False, **kwargs): + """ + Return a connection pool configured from the given URL. + + For example:: + + redis://[:password]@localhost:6379/0 + rediss://[:password]@localhost:6379/0 + unix://[:password]@/path/to/socket.sock?db=0 + + Three URL schemes are supported: + + - ```redis://`` + `_ creates a + normal TCP socket connection + - ```rediss://`` + `_ creates + a SSL wrapped TCP socket connection + - ``unix://`` creates a Unix Domain Socket connection + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + The ``decode_components`` argument allows this function to work with + percent-encoded URLs. If this argument is set to ``True`` all ``%xx`` + escapes will be replaced by their single-character equivalents after + the URL has been parsed. This only applies to the ``hostname``, + ``path``, and ``password`` components. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. The querystring + arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied + are parsed as float values. The arguments ``socket_keepalive`` and + ``retry_on_timeout`` are parsed to boolean values that accept + True/False, Yes/No values to indicate state. Invalid types cause a + ``UserWarning`` to be raised. In the case of conflicting arguments, + querystring arguments always win. + + """ + url = urlparse(url) + url_options = {} + + for name, value in iteritems(parse_qs(url.query)): + if value and len(value) > 0: + parser = URL_QUERY_ARGUMENT_PARSERS.get(name) + if parser: + try: + url_options[name] = parser(value[0]) + except (TypeError, ValueError): + warnings.warn(UserWarning( + "Invalid value for `%s` in connection URL." % name + )) + else: + url_options[name] = value[0] + + if decode_components: + password = unquote(url.password) if url.password else None + path = unquote(url.path) if url.path else None + hostname = unquote(url.hostname) if url.hostname else None + else: + password = url.password + path = url.path + hostname = url.hostname + + # We only support redis://, rediss:// and unix:// schemes. + if url.scheme == 'unix': + url_options.update({ + 'password': password, + 'path': path, + 'connection_class': UnixDomainSocketConnection, + }) + + elif url.scheme in ('redis', 'rediss'): + url_options.update({ + 'host': hostname, + 'port': int(url.port or 6379), + 'password': password, + }) + + # If there's a path argument, use it as the db argument if a + # querystring value wasn't specified + if 'db' not in url_options and path: + try: + url_options['db'] = int(path.replace('/', '')) + except (AttributeError, ValueError): + pass + + if url.scheme == 'rediss': + url_options['connection_class'] = SSLConnection + else: + valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://')) + raise ValueError('Redis URL must specify one of the following' + 'schemes (%s)' % valid_schemes) + + # last shot at the db value + url_options['db'] = int(url_options.get('db', db or 0)) + + # update the arguments from the URL values + kwargs.update(url_options) + + # backwards compatability + if 'charset' in kwargs: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + kwargs['encoding'] = kwargs.pop('charset') + if 'errors' in kwargs: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + kwargs['encoding_errors'] = kwargs.pop('errors') + + return cls(**kwargs) + + def __init__(self, connection_class=Connection, max_connections=None, + **connection_kwargs): + """ + Create a connection pool. If max_connections is set, then this + object raises redis.ConnectionError when the pool's limit is reached. + + By default, TCP connections are created unless connection_class is + specified. Use redis.UnixDomainSocketConnection for unix sockets. + + Any additional keyword arguments are passed to the constructor of + connection_class. + """ + max_connections = max_connections or 2 ** 31 + if not isinstance(max_connections, (int, long)) or max_connections < 0: + raise ValueError('"max_connections" must be a positive integer') + + self.connection_class = connection_class + self.connection_kwargs = connection_kwargs + self.max_connections = max_connections + + self.reset() + + def __repr__(self): + return "%s<%s>" % ( + type(self).__name__, + repr(self.connection_class(**self.connection_kwargs)), + ) + + def reset(self): + self.pid = os.getpid() + self._created_connections = 0 + self._available_connections = [] + self._in_use_connections = set() + self._check_lock = threading.Lock() + + def _checkpid(self): + if self.pid != os.getpid(): + with self._check_lock: + if self.pid == os.getpid(): + # another thread already did the work while we waited + # on the lock. + return + self.reset() + + def get_connection(self, command_name, *keys, **options): + "Get a connection from the pool" + self._checkpid() + try: + connection = self._available_connections.pop() + except IndexError: + connection = self.make_connection() + self._in_use_connections.add(connection) + try: + # ensure this connection is connected to Redis + connection.connect() + # connections that the pool provides should be ready to send + # a command. if not, the connection was either returned to the + # pool before all data has been read or the socket has been + # closed. either way, reconnect and verify everything is good. + try: + if connection.can_read(): + raise ConnectionError('Connection has data') + except ConnectionError: + connection.disconnect() + connection.connect() + if connection.can_read(): + raise ConnectionError('Connection not ready') + except: # noqa: E722 + # release the connection back to the pool so that we don't leak it + self.release(connection) + raise + + return connection + + def get_encoder(self): + "Return an encoder based on encoding settings" + kwargs = self.connection_kwargs + return Encoder( + encoding=kwargs.get('encoding', 'utf-8'), + encoding_errors=kwargs.get('encoding_errors', 'strict'), + decode_responses=kwargs.get('decode_responses', False) + ) + + def make_connection(self): + "Create a new connection" + if self._created_connections >= self.max_connections: + raise ConnectionError("Too many connections") + self._created_connections += 1 + return self.connection_class(**self.connection_kwargs) + + def release(self, connection): + "Releases the connection back to the pool" + self._checkpid() + if connection.pid != self.pid: + return + self._in_use_connections.remove(connection) + self._available_connections.append(connection) + + def disconnect(self): + "Disconnects all connections in the pool" + self._checkpid() + all_conns = chain(self._available_connections, + self._in_use_connections) + for connection in all_conns: + connection.disconnect() + + +class BlockingConnectionPool(ConnectionPool): + """ + Thread-safe blocking connection pool:: + + >>> from redis.client import Redis + >>> client = Redis(connection_pool=BlockingConnectionPool()) + + It performs the same function as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that, + it maintains a pool of reusable connections that can be shared by + multiple redis clients (safely across threads if required). + + The difference is that, in the event that a client tries to get a + connection from the pool when all of connections are in use, rather than + raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it + makes the client wait ("blocks") for a specified number of seconds until + a connection becomes available. + + Use ``max_connections`` to increase / decrease the pool size:: + + >>> pool = BlockingConnectionPool(max_connections=10) + + Use ``timeout`` to tell it either how many seconds to wait for a connection + to become available, or to block forever: + + # Block forever. + >>> pool = BlockingConnectionPool(timeout=None) + + # Raise a ``ConnectionError`` after five seconds if a connection is + # not available. + >>> pool = BlockingConnectionPool(timeout=5) + """ + def __init__(self, max_connections=50, timeout=20, + connection_class=Connection, queue_class=LifoQueue, + **connection_kwargs): + + self.queue_class = queue_class + self.timeout = timeout + super(BlockingConnectionPool, self).__init__( + connection_class=connection_class, + max_connections=max_connections, + **connection_kwargs) + + def reset(self): + self.pid = os.getpid() + self._check_lock = threading.Lock() + + # Create and fill up a thread safe queue with ``None`` values. + self.pool = self.queue_class(self.max_connections) + while True: + try: + self.pool.put_nowait(None) + except Full: + break + + # Keep a list of actual connection instances so that we can + # disconnect them later. + self._connections = [] + + def make_connection(self): + "Make a fresh connection." + connection = self.connection_class(**self.connection_kwargs) + self._connections.append(connection) + return connection + + def get_connection(self, command_name, *keys, **options): + """ + Get a connection, blocking for ``self.timeout`` until a connection + is available from the pool. + + If the connection returned is ``None`` then creates a new connection. + Because we use a last-in first-out queue, the existing connections + (having been returned to the pool after the initial ``None`` values + were added) will be returned before ``None`` values. This means we only + create new connections when we need to, i.e.: the actual number of + connections will only increase in response to demand. + """ + # Make sure we haven't changed process. + self._checkpid() + + # Try and get a connection from the pool. If one isn't available within + # self.timeout then raise a ``ConnectionError``. + connection = None + try: + connection = self.pool.get(block=True, timeout=self.timeout) + except Empty: + # Note that this is not caught by the redis client and will be + # raised unless handled by application code. If you want never to + raise ConnectionError("No connection available.") + + # If the ``connection`` is actually ``None`` then that's a cue to make + # a new connection to add to the pool. + if connection is None: + connection = self.make_connection() + + try: + # ensure this connection is connected to Redis + connection.connect() + # connections that the pool provides should be ready to send + # a command. if not, the connection was either returned to the + # pool before all data has been read or the socket has been + # closed. either way, reconnect and verify everything is good. + try: + if connection.can_read(): + raise ConnectionError('Connection has data') + except ConnectionError: + connection.disconnect() + connection.connect() + if connection.can_read(): + raise ConnectionError('Connection not ready') + except: # noqa: E722 + # release the connection back to the pool so that we don't leak it + self.release(connection) + raise + + return connection + + def release(self, connection): + "Releases the connection back to the pool." + # Make sure we haven't changed process. + self._checkpid() + if connection.pid != self.pid: + return + + # Put the connection back into the pool. + try: + self.pool.put_nowait(connection) + except Full: + # perhaps the pool has been reset() after a fork? regardless, + # we don't want this connection + pass + + def disconnect(self): + "Disconnects all connections in the pool." + self._checkpid() + for connection in self._connections: + connection.disconnect() diff --git a/utill/rediss/exceptions.py b/utill/rediss/exceptions.py new file mode 100644 index 0000000..e7f2cbb --- /dev/null +++ b/utill/rediss/exceptions.py @@ -0,0 +1,65 @@ +"Core exceptions raised by the Redis client" + + +class RedisError(Exception): + pass + + +class ConnectionError(RedisError): + pass + + +class TimeoutError(RedisError): + pass + + +class AuthenticationError(ConnectionError): + pass + + +class BusyLoadingError(ConnectionError): + pass + + +class InvalidResponse(RedisError): + pass + + +class ResponseError(RedisError): + pass + + +class DataError(RedisError): + pass + + +class PubSubError(RedisError): + pass + + +class WatchError(RedisError): + pass + + +class NoScriptError(ResponseError): + pass + + +class ExecAbortError(ResponseError): + pass + + +class ReadOnlyError(ResponseError): + pass + + +class LockError(RedisError, ValueError): + "Errors acquiring or releasing a lock" + # NOTE: For backwards compatability, this class derives from ValueError. + # This was originally chosen to behave like threading.Lock. + pass + + +class LockNotOwnedError(LockError): + "Error trying to extend or release a lock that is (no longer) owned" + pass diff --git a/utill/rediss/lock.py b/utill/rediss/lock.py new file mode 100644 index 0000000..d4b33c8 --- /dev/null +++ b/utill/rediss/lock.py @@ -0,0 +1,274 @@ +import threading +import time as mod_time +import uuid +from .exceptions import LockError, LockNotOwnedError +from .utils import dummy + + +class Lock(object): + """ + A shared, distributed Lock. Using Redis for locking allows the Lock + to be shared across processes and/or machines. + + It's left to the user to resolve deadlock issues and make sure + multiple clients play nicely together. + """ + + lua_release = None + lua_extend = None + lua_reacquire = None + + # KEYS[1] - lock name + # ARGS[1] - token + # return 1 if the lock was released, otherwise 0 + LUA_RELEASE_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + redis.call('del', KEYS[1]) + return 1 + """ + + # KEYS[1] - lock name + # ARGS[1] - token + # ARGS[2] - additional milliseconds + # return 1 if the locks time was extended, otherwise 0 + LUA_EXTEND_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + local expiration = redis.call('pttl', KEYS[1]) + if not expiration then + expiration = 0 + end + if expiration < 0 then + return 0 + end + redis.call('pexpire', KEYS[1], expiration + ARGV[2]) + return 1 + """ + + # KEYS[1] - lock name + # ARGS[1] - token + # ARGS[2] - milliseconds + # return 1 if the locks time was reacquired, otherwise 0 + LUA_REACQUIRE_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + redis.call('pexpire', KEYS[1], ARGV[2]) + return 1 + """ + + def __init__(self, redis, name, timeout=None, sleep=0.1, + blocking=True, blocking_timeout=None, thread_local=True): + """ + Create a new Lock instance named ``name`` using the Redis client + supplied by ``redis``. + + ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + ``timeout`` can be specified as a float or integer, both representing + the number of seconds to wait. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking`` indicates whether calling ``acquire`` should block until + the lock has been acquired or to fail immediately, causing ``acquire`` + to return False and the lock not being acquired. Defaults to True. + Note this value can be overridden by passing a ``blocking`` + argument to ``acquire``. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. + """ + self.redis = redis + self.name = name + self.timeout = timeout + self.sleep = sleep + self.blocking = blocking + self.blocking_timeout = blocking_timeout + self.thread_local = bool(thread_local) + self.local = threading.local() if self.thread_local else dummy() + self.local.token = None + if self.timeout and self.sleep > self.timeout: + raise LockError("'sleep' must be less than 'timeout'") + self.register_scripts() + + def register_scripts(self): + cls = self.__class__ + client = self.redis + if cls.lua_release is None: + cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) + if cls.lua_extend is None: + cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT) + if cls.lua_reacquire is None: + cls.lua_reacquire = \ + client.register_script(cls.LUA_REACQUIRE_SCRIPT) + + def __enter__(self): + # force blocking, as otherwise the user would have to check whether + # the lock was actually acquired or not. + if self.acquire(blocking=True): + return self + raise LockError("Unable to acquire lock within the time specified") + + def __exit__(self, exc_type, exc_value, traceback): + self.release() + + def acquire(self, blocking=None, blocking_timeout=None, token=None): + """ + Use Redis to hold a shared, distributed lock named ``name``. + Returns True once the lock is acquired. + + If ``blocking`` is False, always return immediately. If the lock + was acquired, return True, otherwise return False. + + ``blocking_timeout`` specifies the maximum number of seconds to + wait trying to acquire the lock. + + ``token`` specifies the token value to be used. If provided, token + must be a bytes object or a string that can be encoded to a bytes + object with the default encoding. If a token isn't specified, a UUID + will be generated. + """ + sleep = self.sleep + if token is None: + token = uuid.uuid1().hex.encode() + else: + encoder = self.redis.connection_pool.get_encoder() + token = encoder.encode(token) + if blocking is None: + blocking = self.blocking + if blocking_timeout is None: + blocking_timeout = self.blocking_timeout + stop_trying_at = None + if blocking_timeout is not None: + stop_trying_at = mod_time.time() + blocking_timeout + while True: + if self.do_acquire(token): + self.local.token = token + return True + if not blocking: + return False + if stop_trying_at is not None and mod_time.time() > stop_trying_at: + return False + mod_time.sleep(sleep) + + def do_acquire(self, token): + if self.timeout: + # convert to milliseconds + timeout = int(self.timeout * 1000) + else: + timeout = None + if self.redis.set(self.name, token, nx=True, px=timeout): + return True + return False + + def locked(self): + """ + Returns True if this key is locked by any process, otherwise False. + """ + return self.redis.get(self.name) is not None + + def owned(self): + """ + Returns True if this key is locked by this lock, otherwise False. + """ + stored_token = self.redis.get(self.name) + # need to always compare bytes to bytes + # TODO: this can be simplified when the context manager is finished + if stored_token and not isinstance(stored_token, bytes): + encoder = self.redis.connection_pool.get_encoder() + stored_token = encoder.encode(stored_token) + return self.local.token is not None and \ + stored_token == self.local.token + + def release(self): + "Releases the already acquired lock" + expected_token = self.local.token + if expected_token is None: + raise LockError("Cannot release an unlocked lock") + self.local.token = None + self.do_release(expected_token) + + def do_release(self, expected_token): + if not bool(self.lua_release(keys=[self.name], + args=[expected_token], + client=self.redis)): + raise LockNotOwnedError("Cannot release a lock" + " that's no longer owned") + + def extend(self, additional_time): + """ + Adds more time to an already acquired lock. + + ``additional_time`` can be specified as an integer or a float, both + representing the number of seconds to add. + """ + if self.local.token is None: + raise LockError("Cannot extend an unlocked lock") + if self.timeout is None: + raise LockError("Cannot extend a lock with no timeout") + return self.do_extend(additional_time) + + def do_extend(self, additional_time): + additional_time = int(additional_time * 1000) + if not bool(self.lua_extend(keys=[self.name], + args=[self.local.token, additional_time], + client=self.redis)): + raise LockNotOwnedError("Cannot extend a lock that's" + " no longer owned") + return True + + def reacquire(self): + """ + Resets a TTL of an already acquired lock back to a timeout value. + """ + if self.local.token is None: + raise LockError("Cannot reacquire an unlocked lock") + if self.timeout is None: + raise LockError("Cannot reacquire a lock with no timeout") + return self.do_reacquire() + + def do_reacquire(self): + timeout = int(self.timeout * 1000) + if not bool(self.lua_reacquire(keys=[self.name], + args=[self.local.token, timeout], + client=self.redis)): + raise LockNotOwnedError("Cannot reacquire a lock that's" + " no longer owned") + return True diff --git a/utill/rediss/sentinel.py b/utill/rediss/sentinel.py new file mode 100644 index 0000000..11263d2 --- /dev/null +++ b/utill/rediss/sentinel.py @@ -0,0 +1,286 @@ +import random +import weakref + +from redis.client import Redis +from redis.connection import ConnectionPool, Connection +from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError, + TimeoutError) +from redis._compat import iteritems, nativestr, xrange + + +class MasterNotFoundError(ConnectionError): + pass + + +class SlaveNotFoundError(ConnectionError): + pass + + +class SentinelManagedConnection(Connection): + def __init__(self, **kwargs): + self.connection_pool = kwargs.pop('connection_pool') + super(SentinelManagedConnection, self).__init__(**kwargs) + + def __repr__(self): + pool = self.connection_pool + s = '%s' % (type(self).__name__, pool.service_name) + if self.host: + host_info = ',host=%s,port=%s' % (self.host, self.port) + s = s % host_info + return s + + def connect_to(self, address): + self.host, self.port = address + super(SentinelManagedConnection, self).connect() + if self.connection_pool.check_connection: + self.send_command('PING') + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError('PING failed') + + def connect(self): + if self._sock: + return # already connected + if self.connection_pool.is_master: + self.connect_to(self.connection_pool.get_master_address()) + else: + for slave in self.connection_pool.rotate_slaves(): + try: + return self.connect_to(slave) + except ConnectionError: + continue + raise SlaveNotFoundError # Never be here + + def read_response(self): + try: + return super(SentinelManagedConnection, self).read_response() + except ReadOnlyError: + if self.connection_pool.is_master: + # When talking to a master, a ReadOnlyError when likely + # indicates that the previous master that we're still connected + # to has been demoted to a slave and there's a new master. + # calling disconnect will force the connection to re-query + # sentinel during the next connect() attempt. + self.disconnect() + raise ConnectionError('The previous master is now a slave') + raise + + +class SentinelConnectionPool(ConnectionPool): + """ + Sentinel backed connection pool. + + If ``check_connection`` flag is set to True, SentinelManagedConnection + sends a PING command right after establishing the connection. + """ + + def __init__(self, service_name, sentinel_manager, **kwargs): + kwargs['connection_class'] = kwargs.get( + 'connection_class', SentinelManagedConnection) + self.is_master = kwargs.pop('is_master', True) + self.check_connection = kwargs.pop('check_connection', False) + super(SentinelConnectionPool, self).__init__(**kwargs) + self.connection_kwargs['connection_pool'] = weakref.proxy(self) + self.service_name = service_name + self.sentinel_manager = sentinel_manager + + def __repr__(self): + return "%s>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> slave.get('foo') + 'bar' + + ``sentinels`` is a list of sentinel nodes. Each node is represented by + a pair (hostname, port). + + ``min_other_sentinels`` defined a minimum number of peers for a sentinel. + When querying a sentinel, if it doesn't meet this threshold, responses + from that sentinel won't be considered valid. + + ``sentinel_kwargs`` is a dictionary of connection arguments used when + connecting to sentinel instances. Any argument that can be passed to + a normal Redis connection can be specified here. If ``sentinel_kwargs`` is + not specified, any socket_timeout and socket_keepalive options specified + in ``connection_kwargs`` will be used. + + ``connection_kwargs`` are keyword arguments that will be used when + establishing a connection to a Redis server. + """ + + def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, + **connection_kwargs): + # if sentinel_kwargs isn't defined, use the socket_* options from + # connection_kwargs + if sentinel_kwargs is None: + sentinel_kwargs = { + k: v + for k, v in iteritems(connection_kwargs) + if k.startswith('socket_') + } + self.sentinel_kwargs = sentinel_kwargs + + self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs) + for hostname, port in sentinels] + self.min_other_sentinels = min_other_sentinels + self.connection_kwargs = connection_kwargs + + def __repr__(self): + sentinel_addresses = [] + for sentinel in self.sentinels: + sentinel_addresses.append('%s:%s' % ( + sentinel.connection_pool.connection_kwargs['host'], + sentinel.connection_pool.connection_kwargs['port'], + )) + return '%s' % ( + type(self).__name__, + ','.join(sentinel_addresses)) + + def check_master_state(self, state, service_name): + if not state['is_master'] or state['is_sdown'] or state['is_odown']: + return False + # Check if our sentinel doesn't see other nodes + if state['num-other-sentinels'] < self.min_other_sentinels: + return False + return True + + def discover_master(self, service_name): + """ + Asks sentinel servers for the Redis master's address corresponding + to the service labeled ``service_name``. + + Returns a pair (address, port) or raises MasterNotFoundError if no + master is found. + """ + for sentinel_no, sentinel in enumerate(self.sentinels): + try: + masters = sentinel.sentinel_masters() + except (ConnectionError, TimeoutError): + continue + state = masters.get(service_name) + if state and self.check_master_state(state, service_name): + # Put this sentinel at the top of the list + self.sentinels[0], self.sentinels[sentinel_no] = ( + sentinel, self.sentinels[0]) + return state['ip'], state['port'] + raise MasterNotFoundError("No master found for %r" % (service_name,)) + + def filter_slaves(self, slaves): + "Remove slaves that are in an ODOWN or SDOWN state" + slaves_alive = [] + for slave in slaves: + if slave['is_odown'] or slave['is_sdown']: + continue + slaves_alive.append((slave['ip'], slave['port'])) + return slaves_alive + + def discover_slaves(self, service_name): + "Returns a list of alive slaves for service ``service_name``" + for sentinel in self.sentinels: + try: + slaves = sentinel.sentinel_slaves(service_name) + except (ConnectionError, ResponseError, TimeoutError): + continue + slaves = self.filter_slaves(slaves) + if slaves: + return slaves + return [] + + def master_for(self, service_name, redis_class=Redis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns a redis client instance for the ``service_name`` master. + + A SentinelConnectionPool class is used to retrive the master's + address before establishing a new connection. + + NOTE: If the master's address has changed, any cached connections to + the old master are closed. + + By default clients will be a redis.Redis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = True + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) + + def slave_for(self, service_name, redis_class=Redis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns redis client instance for the ``service_name`` slave(s). + + A SentinelConnectionPool class is used to retrive the slave's + address before establishing a new connection. + + By default clients will be a redis.Redis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = False + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) diff --git a/utill/rediss/utils.py b/utill/rediss/utils.py new file mode 100644 index 0000000..0b0067e --- /dev/null +++ b/utill/rediss/utils.py @@ -0,0 +1,33 @@ +from contextlib import contextmanager + + +try: + import hiredis + HIREDIS_AVAILABLE = True +except ImportError: + HIREDIS_AVAILABLE = False + + +def from_url(url, db=None, **kwargs): + """ + Returns an active Redis client generated from the given database URL. + + Will attempt to extract the database id from the path url fragment, if + none is provided. + """ + from redis.client import Redis + return Redis.from_url(url, db, **kwargs) + + +@contextmanager +def pipeline(redis_obj): + p = redis_obj.pipeline() + yield p + p.execute() + + +class dummy(object): + """ + Instances of this class can be used as an attribute container. + """ + pass -- Gitee From 5e8916b076b78563ec5cd43108de392d3b304e8f Mon Sep 17 00:00:00 2001 From: kunkun Date: Tue, 12 May 2020 01:35:55 +0800 Subject: [PATCH 13/13] kun --- .gitignore | 4 ++-- utill/Queues | Bin 12288 -> 0 bytes 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 utill/Queues diff --git a/.gitignore b/.gitignore index b055fa8..2e97773 100644 --- a/.gitignore +++ b/.gitignore @@ -11,5 +11,5 @@ __pycache__ /dist /kcweb.egg-info /file -utill/db/Queues -utill/db/sqlitedata/kcwlicuxweb \ No newline at end of file +/utill/Queues +/utill/db/sqlitedata/kcwlicuxweb \ No newline at end of file diff --git a/utill/Queues b/utill/Queues deleted file mode 100644 index 59a2485c5ea3ecbb2fa749a1825af7000325d298..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2O>9(E6o6-FLrY;=V`7k%Z)1c=%>Dma5UtM{K}w-@s)7pMpL4O1g0y9!OQT8g zCvjz5m>8mo#soHQBqYWdL_jdcts57BuyW(VrDrU?wl%NDExea@+L_$<&AHz_=iEDK zpPm?-o`)hGo1Jm<1#jHbXf_*<6-A@b9BDKfefYg}HSw4Jr6>5a_xwLDWIS^J#TJfQ zH+-}){0%kK31qSx5Tfh0?hnxH7oj(F|$B*Dg z&qH%?`~(~gtDp9jlkL%NyXcNSKGrT)joo_G&A@iNbK4s`ngg4+Y-zqZ^*3|k1e}1m z#m99k2#c>3Tc=~OYrNZjvOQUh@9h@jQ)6SrzR6vCMkn_dyW9JV(W&m1FgxvGE)Qlz zn2N#SScDwz()+SyZ#y%0xahW@!+G4E%k~0hXOGRUe$mA^KRxpgYianl*6^)H|M02C z$XCOsMqVA+I{fSKhr|0?Keyg%?QHdJ`g+rAo5aT7H-51ZHWov_41F*(F|=vJ_Z!~W zuw(Gg!7m4o4sIK`Ht_Mlfr0zh|Fr&{^_{yQiR&rW25JMffjhuJ=iUvCJrA~;54QRZ zv&zUQlpsa{5sW(#n9`O;8725z=RUl7=F@BE&Ru`|RR4{WpWQt5?)5Y8-dK45;Mu+N zC12z`(CViwBxfx1fl41WI3rCo;8_Zs2q~6x*8O(+TNv#idFldCnD$6_Iq>*$F`<5VRLoad06; z&J0sjGhwKXiYzCra2o9zv~$66qa}}2#ALWe6c>soEx;}3EO)Z-W9c%4Y7&yP zy?8V+1|p5tr2tYVW*MQCkgMfO8KSiITuCaqP8bW2$f?u}1WVE>R^{aE!qxInhKLX_ zaw)A6l%dO*AVemZ)q)u%_!ta6DLu9Pok2tdM< zmNwonrxleZMM8u0GEoWwcx5D4$`>+38by+z1T;gE4M7Hnel2BMP!+3J=Sul}hUg%I zVG{86knk}%Cp7Vb0L8p@D^g{}5??6~WQg~;ERiLTDF)!EkCAwyl2^<}%>K&h{7~-C z5G^0BzO zG9+nZ9A+zE{Zf?-#mbfh3=k`+qoFdAOJz4hA{Wv-pR^)M0Y(KhVYeg(%_bkJlkH;p zbcSHYYMeghSoX$5qLndB;{+{Q0a=}F7t6^E2`*u8LkKGH9we4BE-j6oAj*pB+PPRx zWC%+N`;kO|37k@936~_LV2l^ex@srim-{lLSG&$Bhz^^hBHm)3lpI1J3>c-=)45RY z&5$UJRxu_UsA7_O;&FYwO1)x^T2+g^P>yE^=eTe6s+X~7h?9Z|&Xf`yt0)o2Dwp`X za!-Z`f=NP+N=FcMj(;{?F~J!>(Jck)fTN75) z&c8V^b8wdmo=`y|#U@KU1m`hvP2{*#WM%oR=Dghd+kA86j|P6#S8bp+P#dTX)COt; gwSn3|ZJ;(#8>kJ`25JL$kb$9Q-