diff --git a/debug/accuracy_tools/graph_analyzer/README.md b/debug/accuracy_tools/graph_analyzer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..069b10844f8902fac58f0b21e588aa04de1ece7f --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/README.md @@ -0,0 +1,53 @@ +# Graph Analyzer + +#### 介绍 +图分析精度工具 + +#### 软件架构 +软件架构说明 + + +#### 安装教程 + +1. 下载源码 +``` +git clone https://gitee.com/ascend/mstt.git +``` +2. pip安装 +``` +cd debug/accuracy_tools/graph_analyzer +pip install . +``` + +#### 使用说明 +- ir图使用推荐: + - ir图推荐使用`anf_after_graph_build`图 + +#### 功能说明 +**`<>`表示必选参数,`[]`表示可选参数** +1. ir图结构分析 +使用方式: + +``` +graph_analyzer --ir [--output ] +``` +执行后,会自动分析ir文件,将ir文件分析后的结果输出到指定output目录下的struct.json,如果未指定output则默认为当前目录 + + +2. 数码关联 +数码关联是指数据和代码调用栈的关联,数据一般意义上指静态图`O0`,`O1`,`O2`下dump下来的数据 +目前支持: +- [x] 全量[tensor(npy)]数据格式的数码关联 +- [x] 统计值[statisitc]数据格式的数码关联 +- [x] 融合算子场景 +- [x] 支持超长算子名dump文件的自动解析 +- [x] 反向算子的正向绑定 + +使用方式: + +``` +graph_analyzer --ir --data [--output ] +``` + +- 如果是全量模式,则会把数据文件路径和代码调用栈的关联关系存到output路径下的mapping.csv中 +- 如果是统计值模式,则会把统计值csv中每个条目加上该条目对应的代码栈 diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/__init__.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/bind.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/bind.py new file mode 100644 index 0000000000000000000000000000000000000000..5958b618fa1c4eb46ce9d1785dc35794ebaeeb9b --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/graph_analyzer/bind.py @@ -0,0 +1,159 @@ +import os +import logging +import glob +from typing import Dict, List +from pathlib import Path +import pandas as pd +from graph_analyzer.graph import GraphNode + + +# 定义Trie节点 +class TrieNode: + def __init__(self): + self.children = {} + self.is_end_of_key = False + self.value = None + +# 定义Trie树 +class Trie: + def __init__(self): + self.root = TrieNode() + + # 向Trie中插入一个键 + def insert(self, key, value): + node = self.root + for char in key: + if char not in node.children: + node.children[char] = TrieNode() + node = node.children[char] + # 标记结束位置 + node.is_end_of_key = True + node.value = value + + # 在name字符串中查找所有匹配的键 + def search_in_string(self, string): + matched_values = [] + for i in range(len(string)): + node = self.root + j = i + # 从字符串的每个字符开始,逐字符查找匹配 + while j < len(string) and string[j] in node.children: + node = node.children[string[j]] + if node.is_end_of_key: + matched_values.append(node.value) + j += 1 + return matched_values + +# 定义匹配函数 +def match_codes(trie, name): + matched_nodes = trie.search_in_string(name) + matched_codes = ['\n'.join(ii.code_info) for ii in matched_nodes] + return '\n'.join(matched_codes) + + +def match_names(trie, name): + matched_nodes = trie.search_in_string(name) + matched_names = [ii.scope for ii in matched_nodes] + return '\n'.join(matched_names) + + +def complex_map(df, match_dict): +# 构建Trie树并插入所有键 + trie = Trie() + for key, value in match_dict.items(): + trie.insert(key, value) + + df['Code Stack'] = df['Op Name'].apply(lambda name: match_codes(trie, name)) + df['Scope Name'] = df['Op Name'].apply(lambda name: match_names(trie, name)) + return df + + +def find_npy_files(npy_path): + npy_files = [] + # 检查当前路径是否是一个以 .npy 结尾的文件 + if npy_path.endswith('npy') and os.path.isfile(npy_path): + npy_files.append(Path(npy_path).resolve()) + return npy_files + + npy_files = list(Path(npy_path).rglob('*.npy')) + return npy_files + + +def write_to_csv(param: Dict, output_dir: str, append: bool): + # 打开CSV文件以写入模式 + os.makedirs(output_dir, exist_ok=True) + file_name = os.path.join(output_dir, "code.csv") + data = [(name, res1, res2) for name, (res1, res2) in param.items()] + df = pd.DataFrame(data, columns=['File Path', 'Code Stacks', 'Scope Name']) + # 如果 append 为 True 并且文件已经存在,追加写入 + if append and os.path.exists(file_name): + # 清洗数据,筛选掉空字符串 + df = df[(df['Code Stacks'] != '') | (df['Scope Name'] != '')] + df.to_csv(file_name, mode='a', header=False, index=False) + # 否则,覆盖写入或者文件不存在时正常写入 + else: + df.to_csv(file_name, mode='w', header=True, index=False) + + +def find_statistic_files(directory): + if not os.path.isdir(directory): + return [] + pattern = os.path.join(directory, '**', "statistic.csv") + statistic_files = list(glob.glob(pattern)) + return statistic_files + + + +def bind_for_statistic(statistic_files: List[str], match_dict: Dict): + for statistic_file in statistic_files: + df = pd.read_csv(statistic_file) + df = complex_map(df, match_dict) + logging.info("Processing %s completed, code stack saved in %s", statistic_file, statistic_file) + df.to_csv(statistic_file, index=False) + + +def bind_code_info_for_data(input_dir: str, nodes: Dict[str, GraphNode]) -> Dict[str, str]: + # 待重构后优化性能 + match_dict = {} + for node in nodes.values(): + # 屏蔽子图节点 + if node.is_subgraph: + continue + # 获取规范化后的scope name + scope_name = node.scope.replace("/", "_") + match_dict[scope_name] = node + npy_files = find_npy_files(input_dir) + + bind_result = {} + if not npy_files: + statistic_files = find_statistic_files(input_dir) + if statistic_files: + bind_for_statistic(statistic_files, match_dict) + return bind_result + + for npy_file in npy_files: + directory, file_name = os.path.split(npy_file) # 拆分路径 + name_without_ext = os.path.splitext(file_name)[0] # 提取文件名(去掉扩展名) + if '.' not in name_without_ext: + # 3. 读取find.csv文件 + csv_file_path = os.path.join(directory, 'mapping.csv') + df = pd.read_csv(csv_file_path, header=None) + + # 4. 查找是否有与xxx.npy匹配的条目 + matching_row = df[df[0] == file_name] # 假设A列存储文件名 + if not matching_row.empty: + corresponding_name = matching_row[1].values[0] + logging.info("The corresponding name in column B is: %s", corresponding_name) + else: + corresponding_name = None + logging.info("No entry found for %s in find.csv.", file_name) + name_without_ext = os.path.splitext(corresponding_name)[0] + npy_path = os.path.realpath(npy_file) + node_scope = name_without_ext.split(".")[1] + trie = Trie() + for key, value in match_dict.items(): + trie.insert(key, value) + bind_code = match_codes(trie, node_scope) + bind_name = match_names(trie, node_scope) + bind_result[npy_path] = (bind_code, bind_name) + return bind_result diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..564f56c861f847de5f82f33d2cc91295a0871862 --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph.py @@ -0,0 +1,117 @@ +from typing import List, Dict, Union +from collections import defaultdict, deque + +class GraphNode: + def __init__(self, name: str, pos: int = -1, unique_name: str = "", operator_name: str = "", return_variable: str = "", return_value: str = "", + var_inputs: List[str] = None, has_constant_input: bool = False, unique_id: str="", scope: str = "", code_info: List[str] = None, + is_subgraph: bool = False, attrs: Union[Dict[str, str], List[str]] = None): + self.name = name + self.unique_name = unique_name + self.pos = pos + self.operator_name = operator_name + self.return_variable = return_variable + self.return_value = return_value + self.var_inputs = var_inputs if var_inputs else [] + self.has_constant_input = has_constant_input + self.unique_id = unique_id + self.scope = scope + self.code_info = code_info if code_info else [] + self.attrs = attrs if attrs else ({} if not is_subgraph else []) + self.nodes = {} # Internal nodes if this is a subgraph + self.predecessors = [] # Predecessor nodes + self.successors = [] # Successor nodes + self.is_subgraph = is_subgraph + + def trace_back_ancestors(self, ancestors: List[str], visited: Dict[str, bool], parser) -> None: + if visited[self.unique_name]: + return + visited[self.unique_name] = True + ancestors.append(self.unique_name) + for predecessor in self.predecessors: + predecessor.trace_back_ancestors(ancestors, visited, parser) + + +class Graph: + def __init__(self, nodes): + self.nodes = set(nodes.values()) + + def topological_sort(self): + # 创建邻接表和入度表 + nodes = self.nodes + in_degree = {node: len(node.predecessors) for node in nodes} + + # 初始化队列,将所有入度为 0 的节点加入队列 + queue = deque([node for node in nodes if in_degree[node] == 0]) + topo_order = [] + + # Kahn算法的拓扑排序 + while queue: + node = queue.popleft() + topo_order.append(node) + + for successor in node.successors: + in_degree[successor] -= 1 + if in_degree[successor] == 0: + queue.append(successor) + + return topo_order + + def find_independent_nodes(self, subset_nodes): + # 获取整个图的拓扑排序 + + topo_order = self.topological_sort() + + # 将子集节点记录为集合,方便查找 + subset_set = set(subset_nodes) + + # 追踪哪些子集节点有被访问过 + visited = set() + + # 筛选出不被其他子集节点依赖的节点 + independent_nodes = [] + + # 按照拓扑排序遍历 + for node in topo_order: + if node in subset_set: + # 如果该节点在子集中,检查它是否已经被访问 + if node not in visited: + independent_nodes.append(node) + # 将该节点指向的所有邻居标记为访问过(被依赖过) + for successor in node.successors: + if successor in subset_set: + visited.add(successor) + return independent_nodes + +def find_boundary_nodes(nodes, domain_level): + domain_structure = defaultdict(lambda: {'boundary': {'upper': set(), 'lower': set()}, 'nodes': set()}) + + for node in nodes: + if node.scope.startswith("Gradient"): + continue + node_new_scope = node.scope.split('/') + if domain_level <= len(node_new_scope) - 1: # 确保不使用最后一级 + current_domain = '/'.join(node_new_scope[:domain_level]) + domain_structure[current_domain]['nodes'].add(node) + + for domain, data in domain_structure.items(): + # 遍历域内的节点,寻找上边界和下边界 + for node in data['nodes']: + if not node.operator_name.startswith("Prim"): + continue + node_scope = node.scope.split('/') + for succ in node.successors: + succ_scope = succ.scope.split('/') + if succ.scope.startswith("Gradient") or len(succ_scope) == 2: + continue + if (succ.operator_name != "Param" and succ.operator_name != "Constant") and node_scope[:domain_level] != succ_scope[:domain_level]: + data['boundary']['lower'].add(node.name) + for pred in node.predecessors: + pred_scope = pred.scope.split('/') + if (pred.operator_name != "Param" and pred.operator_name != "Constant") and node_scope[:domain_level] != pred_scope[:domain_level]: + data['boundary']['upper'].add(node.name) + + # 递归处理子域 + sub_nodes = [node for node in data['nodes'] if len(node.scope) > domain_level] + if sub_nodes: + domain_structure[domain].update(find_boundary_nodes(sub_nodes, domain_level + 1)) + return domain_structure diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph_parser.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..376473ca0ea444df978bfa81c5a96ccb7c3ff55a --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/graph_analyzer/graph_parser.py @@ -0,0 +1,214 @@ +import re +import logging +from typing import Tuple, List, Dict +from graph_analyzer.graph import GraphNode + + +class Parser: + def __init__(self): + self.nodes = {} + self.local_dict = {} + self.number_dict = {} + + @staticmethod + def parse_subgraph_attributes(text: str, subgraph_node: GraphNode, start_pos: int, end_pos: int) -> None: + subgraph_attr_pattern = re.compile(r'subgraph attr:\s*(.*)', re.DOTALL) + match = subgraph_attr_pattern.search(text, start_pos, end_pos) + if match: + attrs = match.group(1).strip().split('\n') + if isinstance(subgraph_node.attrs, list): + subgraph_node.attrs.extend(attrs) + + @staticmethod + def parse_graph_attributes(text: str, graph_node: GraphNode) -> None: + attr_pattern = re.compile(r'# Attrs:\s*(.*)', re.DOTALL) + match = attr_pattern.search(text, graph_node.pos) + if match: + attrs = match.group(1).strip().split('\n') + for attr in attrs: + if not attr: # if end line + break + key, value = attr.split(':') + if isinstance(graph_node.attrs, dict): + graph_node.attrs[key.strip()] = value.strip() + + @staticmethod + def parse_code_info(text: str, start_pos: int, end_pos: int) -> List[str]: + code_info = [] + code_info_pattern = re.compile(r'# .*', re.MULTILINE) + final_pos = end_pos if end_pos else len(text) - 1 + lines = text[start_pos + 1:final_pos].split('\n') + for line in lines: + match = code_info_pattern.search(line) + if not match: + break + code_info.append(match.group(0).strip('# ').strip('/')) + return code_info + + @staticmethod + def extract_bracket_content(text: str, start_pos: int) -> Tuple[str, int]: + stack = [] + content = [] + for i in range(start_pos, len(text)): + char = text[i] + if char == '(': + stack.append('(') + elif char == ')': + stack.pop() + if not stack: + content.append(char) + return ''.join(content), i + content.append(char) + raise ValueError("Mismatched parentheses") + + # check ok + @staticmethod + def find_matching_brace(text: str, start_pos: int) -> int: + stack = [] + for i in range(start_pos, len(text)): + if text[i] == '{': + stack.append('{') + elif text[i] == '}': + stack.pop() + if not stack: + return i + raise ValueError("Matching closing brace not found") + + # check ok + @staticmethod + def extract_constants(inputs_str: str) -> List[str]: + constant_pattern = re.compile(r'\b(\w+\(.*?\))') + constants = constant_pattern.findall(inputs_str) + return constants + + def parse_func_graph(self, text: str) -> None: + func_graph_pattern = re.compile(r'# IR entry: @(\S+)') + matches = func_graph_pattern.finditer(text) + for match in matches: + func_name = match.group(1) + func_graph_info = GraphNode(name=func_name, pos=match.start(), is_subgraph=False) + self.nodes[func_name] = func_graph_info + + def parse_nodes(self, text: str, subgraph_info: GraphNode) -> None: + node_pattern = re.compile(r'(%\d+)\((\S+)\)\s*=\s*(\S+)\(') + matches = list(node_pattern.finditer(text)) + for i, match in enumerate(matches): + series_number = match.group(1) + variable_name = match.group(2) + operator_name = match.group(3) + unique_name = "&".join([series_number, variable_name]) + self.local_dict[series_number] = unique_name + + args_str, end_pos = self.__class__.extract_bracket_content(text, match.end() - 1) + inputs = re.findall(r'%\w+', args_str) + subgraph_inputs = re.findall(r'@\w+', args_str) + inputs += subgraph_inputs + + constants = self.__class__.extract_constants(args_str) + + scope_pattern = re.compile(r'# .*scope.*:\s*\((.*?)\)', re.IGNORECASE | re.MULTILINE) + # [^:]scope[^:]:\s*\((.*?)\) + scope_match = scope_pattern.search(text, end_pos) + scope = scope_match.group(1) if scope_match else "" + + id_pattern = re.compile(r'.*cnode_primal_attrs:\s*\{.*\b(?:forward_unique_id|unique_id):\s*\"(\d+)\".*', re.IGNORECASE) + unique_id_match = id_pattern.search(text, end_pos, scope_match.start()) + unique_id = unique_id_match.group(1) if unique_id_match else None + + if scope: + next_match = matches[i + 1].start() - 1 if i < len(matches) - 1 else None + code_info = self.__class__.parse_code_info(text, scope_match.end(), next_match) + else: + code_info = None + + node_info = GraphNode(name=variable_name, unique_name=unique_name, operator_name=operator_name, var_inputs=inputs + constants, unique_id=unique_id, scope=scope, code_info=code_info) + + if unique_id and scope and not scope.startswith("Gradients"): + self.number_dict[unique_id] = node_info + + if subgraph_info: + subgraph_info.nodes[variable_name] = node_info # 这里不用unique_name会有事吗 + + if not self.nodes.get(unique_name, None): + self.nodes[unique_name] = node_info + else: + pass + + for const in constants: + if const not in self.nodes: + const_node = GraphNode(name=const, operator_name="Constant", var_inputs=[], has_constant_input=True) + if not self.nodes.get(const_node, None): + self.nodes[const] = const_node + if subgraph_info: + subgraph_info.nodes[const] = const_node + self.local_dict[const] = const + + for input_var in node_info.var_inputs: + if input_var in self.local_dict or input_var in self.nodes: + input_name = self.local_dict.get(input_var, input_var) # 没有就用原来名字 + input_node = self.nodes.get(input_name, None) + if input_node: + node_info.predecessors.append(input_node) + input_node.successors.append(node_info) + else: + param_node = GraphNode(name=input_var, operator_name="Param", var_inputs=[], has_constant_input=False) + if not self.nodes.get(input_var, None): + self.nodes[input_var] = param_node + node_info.predecessors.append(param_node) + param_node.successors.append(node_info) + + # check ok + def extract_callees(self, text: str) -> None: + for node_info in self.nodes.values(): + func_start_pos = node_info.pos + func_end_pos = text.find('}', func_start_pos) + func_text = text[func_start_pos:func_end_pos] + callee_pattern = re.compile(r'Partial\(@(\S+)\(') + callee_matches = callee_pattern.finditer(func_text) + for callee_match in callee_matches: + callee_name = callee_match.group(1) + if callee_name not in node_info.var_inputs: + node_info.var_inputs.append(callee_name) + + # check ok + def parse_subgraphs(self, text: str) -> None: + subgraph_pattern = re.compile(r'subgraph\s+@(\S+)(\([^\)]*\))?\s+.*\{') + matches = list(subgraph_pattern.finditer(text)) + end_pos = 0 + for match in matches: + last_pos = end_pos + 2 + subgraph_name = match.group(1).split('(')[0] + start_pos = match.start() + end_pos = self.__class__.find_matching_brace(text, start_pos) + subgraph_text = text[start_pos:end_pos + 1] + attr_text = text[last_pos:start_pos] + subgraph_info = GraphNode(name=subgraph_name, pos=start_pos, is_subgraph=True) + self.nodes[subgraph_name] = subgraph_info + self.__class__.parse_subgraph_attributes(text, subgraph_info, last_pos, start_pos) + self.parse_nodes(subgraph_text, subgraph_info) + subgraph_info.end = end_pos + logging.info('Parsed subgraph: %s', subgraph_name) + + # check ok + def count_nodes(self) -> Tuple[int, int]: + total_nodes = len(self.nodes) + total_cnodes = sum(1 for node in self.nodes.values() if node.name.startswith('CNode')) + return total_nodes, total_cnodes + + # check ok + def create_backward_map(self): + for node in self.nodes.values(): + if node.scope and node.scope.startswith("Gradients"): + related_forward_node = self.number_dict.get(node.unique_id, None) + if related_forward_node: + node.code_info = related_forward_node.code_info + + def parse(self, text: str) -> None: + self.parse_func_graph(text) + self.parse_subgraphs(text) + self.parse_nodes(text, None) + self.extract_callees(text) + self.create_backward_map() + + def get_nodes(self) -> Dict[str, GraphNode]: + return self.nodes diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/main.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/main.py new file mode 100644 index 0000000000000000000000000000000000000000..18df68f01670018fb989e3b35eaaa5658a14461a --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/graph_analyzer/main.py @@ -0,0 +1,23 @@ +import os +import re +import sys +import argparse +from typing import List +from graph_analyzer.processor import process + + + +def main(): + parser = argparse.ArgumentParser(description="IR Parser") + parser.add_argument('--ir', type=str, required=True, help="Path to the graph file") + parser.add_argument('--data', type=str, required=False, default=None, help="Path to data dir") + parser.add_argument('--node-list', type=List, required=False, default=None, help="Error node list") + parser.add_argument('--output', type=str, required=False, default="./", help="Path to output dir") + parser.add_argument('--append', action='store_true', help="Whether to append to the CSV file if it exists") + args = parser.parse_args() + + process(args) + + +if __name__ == "__main__": + main() diff --git a/debug/accuracy_tools/graph_analyzer/graph_analyzer/processor.py b/debug/accuracy_tools/graph_analyzer/graph_analyzer/processor.py new file mode 100644 index 0000000000000000000000000000000000000000..370008da591df2566cfc8d37b8bda9dfe5b57bff --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/graph_analyzer/processor.py @@ -0,0 +1,45 @@ +import os +import stat +import json +from graph_analyzer.graph import Graph, find_boundary_nodes +from graph_analyzer.graph_parser import Parser +from graph_analyzer.bind import bind_code_info_for_data, write_to_csv + +def serialize_domain_structure(domain_structure): + serialized_structure = {} + for domain, data in domain_structure.items(): + serialized_structure[domain] = { + 'boundary': {'upper': list(data['boundary']['upper']), 'lower': list(data['boundary']['lower'])}, + 'nodes': [node.name for node in data['nodes']] + } + # 递归处理子域,避免解析 boundary 和 nodes 部分 + for key in data: + if key not in ['boundary', 'nodes', 'upper', 'lower']: + serialized_structure[domain][key] = serialize_domain_structure({key: data[key]}) + return serialized_structure + +def process(args): + ir_file_path = args.ir + with open(ir_file_path, 'r') as f: + input_text = f.read() + + parser = Parser() + parser.parse(input_text) + + nodes = parser.get_nodes() + graph = Graph(nodes) + + if args.data: + bind_result = bind_code_info_for_data(args.data, nodes) + if bind_result: + # 将 append 参数传递给 write_to_csv + write_to_csv(bind_result, args.output, args.append) + + domain_structure = find_boundary_nodes(nodes.values(), 1) + output_structure = serialize_domain_structure(domain_structure) + output_file = os.path.join(args.output, "struct.json") + + # 使用 os.open() 指定文件权限 + fd = os.open(output_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) # 0o600 表示读写 + with open(fd, "w") as f: + json.dump(output_structure, f, indent=4) diff --git a/debug/accuracy_tools/graph_analyzer/setup.py b/debug/accuracy_tools/graph_analyzer/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..34e6b7e88cc4668a23c1741217faf11f1cf7281f --- /dev/null +++ b/debug/accuracy_tools/graph_analyzer/setup.py @@ -0,0 +1,26 @@ +import os +from setuptools import setup, find_packages + +setup( + name='graph_analyzer', + version='1.0.0', + packages=find_packages(), + install_requires=[ + ], + entry_points={ + 'console_scripts': [ + 'graph_analyzer=graph_analyzer.main:main', # Allows running `graph_analyzer` + ], + }, + author='Ascend Team', + description='Graph Analyzer used for graph analysis and dump data analysis', + long_description=open('README.md', encoding='utf-8').read() if os.path.exists('README.md') else '', + long_description_content_type='text/markdown', + url='https://gitee.com/ascend/mstt/tree/master/debug/accuracy_tools/graph_analyzer/graph_analyzer', + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + ], + python_requires='>=3.6', +)