it = networkInterfaceSet.iterator();
+ String mac = "";
+ while (it.hasNext()) {
+ mac = NetworkUtils.getMacAddress(it.next(), "-");
+ if (StringUtils.hasText(mac)) {
+ break;
+ }
+ }
+ return mac.toUpperCase();
+ }
+}
diff --git a/itools-core/itools-common/src/main/java/com/itools/core/snowflake/impl/RandomNodeGenerate.java b/itools-core/itools-common/src/main/java/com/itools/core/snowflake/impl/RandomNodeGenerate.java
new file mode 100644
index 0000000000000000000000000000000000000000..af44815d561a88deca18c633e7d34c01709364d9
--- /dev/null
+++ b/itools-core/itools-common/src/main/java/com/itools/core/snowflake/impl/RandomNodeGenerate.java
@@ -0,0 +1,139 @@
+package com.itools.core.snowflake.impl;
+
+import org.springframework.data.redis.core.RedisTemplate;
+
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * ClassName: RandomNodeGenerate
+ * Description:使用一个set维护datacenter, 使用set维护每一个datacenter的work list
+ *
+ * 每个机器多个应用时使用,可用于容器,不能根据id反查workid,因为workid会变
+ */
+public class RandomNodeGenerate implements WorkNodeGenerate {
+
+ /**
+ * 机器id所占的位数
+ */
+ private final int workerIdBits = 5;
+
+ /**
+ * 最大机器节点数 31 理论支持31*31台机器
+ */
+ private final Integer MAX_NODE_ID = ~(-1 << workerIdBits);
+
+ /**
+ * 最小节点编号
+ */
+ private final Integer MIN_NODE_ID = 0;
+
+ private static String DATA_CENTER_KEY = "DATA_CENTER_KEY";
+
+ private static String WORK_ID_KEY_PREFIXX = "WORK_ID_KEY::";
+
+ private static String SNOW_FLAKE_KEY_LOCK;
+
+ private static String SNOW_FLAKE_KEY_LOCK_VALUE;
+
+ /** redis lock time out 5 seconds*/
+ private static final int LOCK_TIMEOUT = 5;
+
+ private Integer dataCenterId;
+
+ static {
+ SNOW_FLAKE_KEY_LOCK = DATA_CENTER_KEY + "::LOCK";
+ SNOW_FLAKE_KEY_LOCK_VALUE = SNOW_FLAKE_KEY_LOCK + "::VALUE";
+ }
+
+ /**
+ * 1.先找到可以使用的datacenter(满足work list大小不大于最大节点数即可)
+ * 2.取到已经找到的datacenter的 work list ,并占位
+ * 3.datacenter id , workid
+ *
+ * @param redisTemplate
+ * @return
+ */
+ @Override
+ public ClusterNode generate(RedisTemplate