合并 gitee&github 的代码;

master
3y 3 weeks ago
commit 1dd0a70e5b

@ -37,7 +37,7 @@ public class CronAsyncThreadPoolConfig {
return ExecutorBuilder.create()
.setCorePoolSize(ThreadPoolConstant.COMMON_CORE_POOL_SIZE)
.setMaxPoolSize(ThreadPoolConstant.COMMON_MAX_POOL_SIZE)
.setWorkQueue(new LinkedBlockingQueue(ThreadPoolConstant.BIG_QUEUE_SIZE))
.setWorkQueue(new LinkedBlockingQueue<>(ThreadPoolConstant.BIG_QUEUE_SIZE))
.setHandler(new ThreadPoolExecutor.CallerRunsPolicy())
.setAllowCoreThreadTimeOut(true)
.setKeepAliveTime(ThreadPoolConstant.SMALL_KEEP_LIVE_TIME, TimeUnit.SECONDS)

@ -25,7 +25,7 @@ public class CronTaskHandler {
@Autowired
private ThreadPoolUtils threadPoolUtils;
private DtpExecutor dtpExecutor = CronAsyncThreadPoolConfig.getXxlCronExecutor();
private final DtpExecutor dtpExecutor = CronAsyncThreadPoolConfig.getXxlCronExecutor();
/**
* austin

@ -14,7 +14,7 @@ import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
import java.util.Arrays;
import java.util.Collections;
/**
@ -48,8 +48,8 @@ public class NightShieldLazyPendingHandler {
String taskInfo = redisUtils.lPop(NIGHT_SHIELD_BUT_NEXT_DAY_SEND_KEY);
if (CharSequenceUtil.isNotBlank(taskInfo)) {
try {
kafkaTemplate.send(topicName, JSON.toJSONString(Arrays.asList(JSON.parseObject(taskInfo, TaskInfo.class))
, new SerializerFeature[]{SerializerFeature.WriteClassName}));
kafkaTemplate.send(topicName, JSON.toJSONString(Collections.singletonList(JSON.parseObject(taskInfo, TaskInfo.class))
, SerializerFeature.WriteClassName));
} catch (Exception e) {
log.error("nightShieldLazyJob send kafka fail! e:{},params:{}", Throwables.getStackTraceAsString(e), taskInfo);
}

@ -42,7 +42,7 @@ public class CrowdBatchTaskPending extends AbstractLazyPending<CrowdInfoVo> {
public CrowdBatchTaskPending() {
PendingParam<CrowdInfoVo> pendingParam = new PendingParam<>();
pendingParam.setQueue(new LinkedBlockingQueue(PendingConstant.QUEUE_SIZE))
pendingParam.setQueue(new LinkedBlockingQueue<>(PendingConstant.QUEUE_SIZE))
.setTimeThreshold(PendingConstant.TIME_THRESHOLD)
.setNumThreshold(AustinConstant.BATCH_RECEIVER_SIZE)
.setExecutorService(CronAsyncThreadPoolConfig.getConsumePendingThreadPool());

@ -10,7 +10,6 @@ import com.java3y.austin.cron.csv.CountFileRowHandler;
import com.java3y.austin.cron.vo.CrowdInfoVo;
import lombok.extern.slf4j.Slf4j;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Paths;
@ -60,7 +59,7 @@ public class ReadFileUtils {
// 把首行当做是标题获取reader
try (CsvReader reader = CsvUtil.getReader(
new InputStreamReader(new FileInputStream(path), CharsetUtil.CHARSET_UTF_8),
new InputStreamReader(Files.newInputStream(Paths.get(path)), CharsetUtil.CHARSET_UTF_8),
new CsvReadConfig().setContainsHeader(true))) {
reader.read(countFileRowHandler);

@ -6,6 +6,7 @@ import cn.hutool.core.util.StrUtil;
import cn.hutool.http.HttpRequest;
import cn.hutool.http.HttpResponse;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import com.google.common.base.Throwables;
import com.java3y.austin.common.enums.RespStatusEnum;
import com.java3y.austin.common.vo.BasicResultVO;
@ -48,7 +49,7 @@ public class CronTaskServiceImpl implements CronTaskService {
@Override
public BasicResultVO saveCronTask(XxlJobInfo xxlJobInfo) {
Map<String, Object> params = JSON.parseObject(JSON.toJSONString(xxlJobInfo), Map.class);
Map<String, Object> params = JSON.parseObject(JSON.toJSONString(xxlJobInfo), new TypeReference<Map<String, Object>>() {});
String path = Objects.isNull(xxlJobInfo.getId()) ? xxlAddresses + XxlJobConstant.INSERT_URL
: xxlAddresses + XxlJobConstant.UPDATE_URL;
@ -174,7 +175,7 @@ public class CronTaskServiceImpl implements CronTaskService {
@Override
public BasicResultVO createGroup(XxlJobGroup xxlJobGroup) {
Map<String, Object> params = JSON.parseObject(JSON.toJSONString(xxlJobGroup), Map.class);
Map<String, Object> params = JSON.parseObject(JSON.toJSONString(xxlJobGroup), new TypeReference<Map<String, Object>>() {});
String path = xxlAddresses + XxlJobConstant.JOB_GROUP_INSERT_URL;
HttpResponse response;

@ -74,7 +74,7 @@ public class AustinHiveBootStrap {
// 3. 将kafka_source 数据写入到kafka_sink 完成
tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
tableEnv.executeSql("INSERT INTO " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SINK_TABLE_NAME + " SELECT ids,state,businessId,logTimestamp FROM " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SOURCE_TABLE_NAME + "");
tableEnv.executeSql("INSERT INTO " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SINK_TABLE_NAME + " SELECT ids,state,businessId,logTimestamp FROM " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SOURCE_TABLE_NAME);
}
}

@ -18,15 +18,15 @@ import java.util.Map;
public class AlipayClientSingleton {
private static Map<String, DefaultAlipayClient> alipayClientMap = new HashMap<>();
private static final Map<String, DefaultAlipayClient> ALIPAY_CLIENT_MAP = new HashMap<>();
private AlipayClientSingleton() {
}
public static DefaultAlipayClient getSingleton(AlipayMiniProgramAccount alipayMiniProgramAccount) throws AlipayApiException {
if (!alipayClientMap.containsKey(alipayMiniProgramAccount.getAppId())) {
if (!ALIPAY_CLIENT_MAP.containsKey(alipayMiniProgramAccount.getAppId())) {
synchronized (DefaultAlipayClient.class) {
if (!alipayClientMap.containsKey(alipayMiniProgramAccount.getAppId())) {
if (!ALIPAY_CLIENT_MAP.containsKey(alipayMiniProgramAccount.getAppId())) {
AlipayConfig alipayConfig = new AlipayConfig();
alipayConfig.setServerUrl(SendChanelUrlConstant.ALI_MINI_PROGRAM_GATEWAY_URL);
alipayConfig.setAppId(alipayMiniProgramAccount.getAppId());
@ -35,10 +35,10 @@ public class AlipayClientSingleton {
alipayConfig.setAlipayPublicKey(alipayMiniProgramAccount.getAlipayPublicKey());
alipayConfig.setCharset("utf-8");
alipayConfig.setSignType("RSA2");
alipayClientMap.put(alipayMiniProgramAccount.getAppId(), new DefaultAlipayClient(alipayConfig));
ALIPAY_CLIENT_MAP.put(alipayMiniProgramAccount.getAppId(), new DefaultAlipayClient(alipayConfig));
}
}
}
return alipayClientMap.get(alipayMiniProgramAccount.getAppId());
return ALIPAY_CLIENT_MAP.get(alipayMiniProgramAccount.getAppId());
}
}

@ -131,7 +131,7 @@ public class SensitiveWordsConfig {
try {
TimeUnit.SECONDS.sleep(UPDATE_TIME_SECONDS);
log.debug("SensitiveWordConfig#startScheduledUpdate start update...");
loadSensitiveWords();
loadSensWords();
storeSensWords();
} catch (InterruptedException e) {
log.error("SensitiveWordConfig#startScheduledUpdate interrupted: {}", e.getMessage());

@ -37,7 +37,7 @@ public class SlideWindowLimitService extends AbstractLimitService {
@PostConstruct
public void init() {
redisScript = new DefaultRedisScript();
redisScript = new DefaultRedisScript<>();
redisScript.setResultType(Long.class);
redisScript.setScriptSource(new ResourceScriptSource(new ClassPathResource("limit.lua")));
}

@ -1,6 +1,5 @@
package com.java3y.austin.handler.handler;
import cn.hutool.core.date.DateUtil;
import com.java3y.austin.common.domain.AnchorInfo;
import com.java3y.austin.common.domain.TaskInfo;
import com.java3y.austin.common.enums.AnchorState;

@ -14,7 +14,7 @@ import java.util.Map;
@Component
public class HandlerHolder {
private Map<Integer, Handler> handlers = new HashMap<>(128);
private final Map<Integer, Handler> handlers = new HashMap<>(128);
public void putHandler(Integer channelCode, Handler handler) {
handlers.put(channelCode, handler);

@ -67,7 +67,7 @@ public class AlipayMiniProgramAccountHandler extends BaseHandler{
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
Map<String, String> valueMap = new HashMap<>();
Map<String, String> valueMap = new HashMap<>(1);
valueMap.put("value", entry.getValue());
return valueMap;
}

@ -128,7 +128,7 @@ public class DingDingRobotHandler extends BaseHandler{
Mac mac = Mac.getInstance(CommonConstant.HMAC_SHA256_ENCRYPTION_ALGO);
mac.init(new SecretKeySpec(secret.getBytes(StandardCharsets.UTF_8), CommonConstant.HMAC_SHA256_ENCRYPTION_ALGO));
byte[] signData = mac.doFinal(stringToSign.getBytes(StandardCharsets.UTF_8));
sign = URLEncoder.encode(new String(Base64.encodeBase64(signData), CommonConstant.CHARSET_UTF_8));
sign = URLEncoder.encode(new String(Base64.encodeBase64(signData), StandardCharsets.UTF_8), CommonConstant.CHARSET_UTF_8);
} catch (Exception e) {
log.error("DingDingHandler#assembleSign fail!:{}", Throwables.getStackTraceAsString(e));
}

@ -45,7 +45,7 @@ public class EmailHandler extends BaseHandler{
channelCode = ChannelType.EMAIL.getCode();
// 按照请求限流,默认单机 3 qps 具体数值配置在apollo动态调整)
Double rateInitValue = Double.valueOf(3);
double rateInitValue = 3.0;
flowControlParam = FlowControlParam.builder().rateInitValue(rateInitValue)
.rateLimitStrategy(RateLimitStrategy.REQUEST_RATE_LIMIT)
.rateLimiter(RateLimiter.create(rateInitValue)).build();
@ -61,7 +61,7 @@ public class EmailHandler extends BaseHandler{
if (CollUtil.isEmpty(files)) {
MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true);
} else {
MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true, files.toArray(new File[files.size()]));
MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true, files.toArray(new File[0]));
}

@ -27,7 +27,7 @@ import org.springframework.context.ApplicationContext;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
@ -116,7 +116,7 @@ public class SmsHandler extends BaseHandler{
*/
if (!taskInfo.getSendAccount().equals(AUTO_FLOW_RULE)) {
SmsAccount account = accountUtils.getAccountById(taskInfo.getSendAccount(), SmsAccount.class);
return Arrays.asList(MessageTypeSmsConfig.builder().sendAccount(taskInfo.getSendAccount()).scriptName(account.getScriptName()).weights(100).build());
return Collections.singletonList(MessageTypeSmsConfig.builder().sendAccount(taskInfo.getSendAccount()).scriptName(account.getScriptName()).weights(100).build());
}
/**

@ -42,7 +42,6 @@ public class TaskPendingHolder {
for (String groupId : groupIds) {
DtpExecutor executor = HandlerThreadPoolConfig.getExecutor(groupId);
threadPoolUtils.register(executor);
}
}

@ -39,7 +39,7 @@ public class ReceiverStart {
/**
* groupId
*/
private static List<String> groupIds = GroupIdMappingUtils.getAllGroupIds();
private static final List<String> GROUP_IDS = GroupIdMappingUtils.getAllGroupIds();
/**
* (groupIds)
*/
@ -58,7 +58,7 @@ public class ReceiverStart {
if (element instanceof Method) {
String name = ((Method) element).getDeclaringClass().getSimpleName() + StrPool.DOT + ((Method) element).getName();
if (RECEIVER_METHOD_NAME.equals(name)) {
attrs.put("groupId", groupIds.get(index++));
attrs.put("groupId", GROUP_IDS.get(index++));
}
}
return attrs;
@ -70,7 +70,7 @@ public class ReceiverStart {
*/
@PostConstruct
public void init() {
for (int i = 0; i < groupIds.size(); i++) {
for (int i = 0; i < GROUP_IDS.size(); i++) {
context.getBean(Receiver.class);
}
}

@ -152,7 +152,7 @@ public class TencentSmsScript implements SmsScript {
*/
private List<SmsRecord> assemblePullSmsRecord(TencentSmsAccount account, PullSmsSendStatusResponse resp) {
List<SmsRecord> smsRecordList = new ArrayList<>();
if (Objects.nonNull(resp) && Objects.nonNull(resp.getPullSmsSendStatusSet()) && resp.getPullSmsSendStatusSet().length > 0) {
if (Objects.nonNull(resp) && Objects.nonNull(resp.getPullSmsSendStatusSet())) {
for (PullSmsSendStatus pullSmsSendStatus : resp.getPullSmsSendStatusSet()) {
SmsRecord smsRecord = SmsRecord.builder()
.sendDate(Integer.valueOf(DateUtil.format(new Date(), DatePattern.PURE_DATE_PATTERN)))

@ -6,7 +6,6 @@ import cn.hutool.core.net.URLEncodeUtil;
import cn.hutool.core.text.CharSequenceUtil;
import cn.hutool.core.text.StrPool;
import cn.hutool.core.util.ArrayUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.http.Header;
import cn.hutool.http.HttpRequest;
import com.alibaba.fastjson.JSON;
@ -38,7 +37,7 @@ public class YunPianSmsScript implements SmsScript {
private static final String PARAMS_SPLIT_KEY = "{|}";
private static final String PARAMS_KV_SPLIT_KEY = "{:}";
private static Logger log = LoggerFactory.getLogger(YunPianSmsScript.class);
private static final Logger log = LoggerFactory.getLogger(YunPianSmsScript.class);
@Autowired
private AccountUtils accountUtils;

@ -37,7 +37,7 @@ public class RecallMqAction implements BusinessProcess<RecallTaskModel> {
public void process(ProcessContext<RecallTaskModel> context) {
RecallTaskInfo recallTaskInfo = context.getProcessModel().getRecallTaskInfo();
try {
String message = JSON.toJSONString(recallTaskInfo, new SerializerFeature[]{SerializerFeature.WriteClassName});
String message = JSON.toJSONString(recallTaskInfo, SerializerFeature.WriteClassName);
sendMqService.send(austinRecall, message, tagId);
} catch (Exception e) {
context.setNeedBreak(true).setResponse(BasicResultVO.fail(RespStatusEnum.SERVICE_ERROR));

@ -47,7 +47,7 @@ public class SendMqAction implements BusinessProcess<SendTaskModel> {
SendTaskModel sendTaskModel = context.getProcessModel();
List<TaskInfo> taskInfo = sendTaskModel.getTaskInfo();
try {
String message = JSON.toJSONString(sendTaskModel.getTaskInfo(), new SerializerFeature[]{SerializerFeature.WriteClassName});
String message = JSON.toJSONString(sendTaskModel.getTaskInfo(), SerializerFeature.WriteClassName);
sendMqService.send(sendMessageTopic, message, tagId);
context.setResponse(BasicResultVO.success(taskInfo.stream().map(v -> SimpleTaskInfo.builder().businessId(v.getBusinessId()).messageId(v.getMessageId()).bizId(v.getBizId()).build()).collect(Collectors.toList())));

@ -13,7 +13,7 @@ import org.apache.flink.util.Collector;
public class AustinFlatMapFunction implements FlatMapFunction<String, AnchorInfo> {
@Override
public void flatMap(String value, Collector<AnchorInfo> collector) throws Exception {
public void flatMap(String value, Collector<AnchorInfo> collector){
AnchorInfo anchorInfo = JSON.parseObject(value, AnchorInfo.class);
collector.collect(anchorInfo);
}

@ -28,7 +28,7 @@ import java.util.List;
public class AustinSink implements SinkFunction<AnchorInfo> {
@Override
public void invoke(AnchorInfo anchorInfo, Context context) throws Exception {
public void invoke(AnchorInfo anchorInfo, Context context){
realTimeData(anchorInfo);
}

@ -23,14 +23,14 @@ public class LettuceRedisUtils {
/**
* redisClient
*/
private static RedisClient redisClient;
private static final RedisClient REDIS_CLIENT;
static {
RedisURI redisUri = RedisURI.Builder.redis(AustinFlinkConstant.REDIS_IP)
.withPort(Integer.parseInt(AustinFlinkConstant.REDIS_PORT))
.withPassword(AustinFlinkConstant.REDIS_PASSWORD.toCharArray())
.build();
redisClient = RedisClient.create(redisUri);
REDIS_CLIENT = RedisClient.create(redisUri);
}
private LettuceRedisUtils() {
@ -41,14 +41,14 @@ public class LettuceRedisUtils {
* pipeline
*/
public static void pipeline(RedisPipelineCallBack pipelineCallBack) {
StatefulRedisConnection<byte[], byte[]> connect = redisClient.connect(new ByteArrayCodec());
StatefulRedisConnection<byte[], byte[]> connect = REDIS_CLIENT.connect(new ByteArrayCodec());
RedisAsyncCommands<byte[], byte[]> commands = connect.async();
List<RedisFuture<?>> futures = pipelineCallBack.invoke(commands);
commands.flushCommands();
LettuceFutures.awaitAll(10, TimeUnit.SECONDS,
futures.toArray(new RedisFuture[futures.size()]));
futures.toArray(new RedisFuture[0]));
connect.close();
}

@ -26,7 +26,7 @@ public class SupportThreadPoolConfig {
return ExecutorBuilder.create()
.setCorePoolSize(ThreadPoolConstant.SINGLE_CORE_POOL_SIZE)
.setMaxPoolSize(ThreadPoolConstant.SINGLE_MAX_POOL_SIZE)
.setWorkQueue(new LinkedBlockingQueue(ThreadPoolConstant.BIG_QUEUE_SIZE))
.setWorkQueue(new LinkedBlockingQueue<>(ThreadPoolConstant.BIG_QUEUE_SIZE))
.setHandler(new ThreadPoolExecutor.CallerRunsPolicy())
.setAllowCoreThreadTimeOut(true)
.setKeepAliveTime(ThreadPoolConstant.SMALL_KEEP_LIVE_TIME, TimeUnit.SECONDS)

@ -21,7 +21,7 @@ import org.springframework.stereotype.Service;
@Service
@ConditionalOnProperty(name = "austin.mq.pipeline", havingValue = MessageQueuePipeline.EVENT_BUS)
public class EventBusSendMqServiceImpl implements SendMqService {
private EventBus eventBus = new EventBus();
private final EventBus eventBus = new EventBus();
@Autowired
private EventBusListener eventBusListener;

@ -14,7 +14,7 @@ import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@ -36,7 +36,7 @@ public class KafkaSendMqServiceImpl implements SendMqService {
@Override
public void send(String topic, String jsonValue, String tagId) {
if (CharSequenceUtil.isNotBlank(tagId)) {
List<Header> headers = Arrays.asList(new RecordHeader(tagIdKey, tagId.getBytes(StandardCharsets.UTF_8)));
List<Header> headers = Collections.singletonList(new RecordHeader(tagIdKey, tagId.getBytes(StandardCharsets.UTF_8)));
kafkaTemplate.send(new ProducerRecord(topic, null, null, null, jsonValue, headers));
return;
}

@ -12,7 +12,7 @@ import org.springframework.context.ApplicationEvent;
@Getter
public class AustinSpringEventBusEvent extends ApplicationEvent {
private AustinSpringEventSource austinSpringEventSource;
private final AustinSpringEventSource austinSpringEventSource;
public AustinSpringEventBusEvent(Object source, AustinSpringEventSource austinSpringEventSource) {
super(source);

@ -23,7 +23,7 @@ public class ConfigServiceImpl implements ConfigService {
*
*/
private static final String PROPERTIES_PATH = "local.properties";
private Props props = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8);
private final Props PROPS = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8);
/**
* apollo
@ -49,7 +49,7 @@ public class ConfigServiceImpl implements ConfigService {
} else if (Boolean.TRUE.equals(enableNacos)) {
return nacosUtils.getProperty(key, defaultValue);
} else {
return props.getProperty(key, defaultValue);
return PROPS.getProperty(key, defaultValue);
}
}
}

@ -45,8 +45,8 @@ public class AccountUtils {
/**
* /
*/
private ConcurrentMap<ChannelAccount, WxMpService> officialAccountServiceMap = new ConcurrentHashMap<>();
private ConcurrentMap<ChannelAccount, WxMaService> miniProgramServiceMap = new ConcurrentHashMap<>();
private final ConcurrentMap<ChannelAccount, WxMpService> officialAccountServiceMap = new ConcurrentHashMap<>();
private final ConcurrentMap<ChannelAccount, WxMaService> miniProgramServiceMap = new ConcurrentHashMap<>();
@Bean
public RedisTemplateWxRedisOps redisTemplateWxRedisOps() {

@ -45,7 +45,7 @@ public class ContentHolderUtil {
@Override
public String resolvePlaceholder(String placeholderName) {
if (Objects.isNull(paramMap)) {
String errorStr = MessageFormat.format("template:{0} require param:{1},but not exist! paramMap:{2}", template, placeholderName, paramMap);
String errorStr = MessageFormat.format("template:{0} require param:{1},but not exist! paramMap:{2}", template, placeholderName, null);
throw new IllegalArgumentException(errorStr);
}
String value = paramMap.get(placeholderName);

@ -31,7 +31,7 @@ public class LogUtils extends CustomLogListener {
* @OperationLog
*/
@Override
public void createLog(LogDTO logDTO) throws Exception {
public void createLog(LogDTO logDTO){
log.info(JSON.toJSONString(logDTO));
}

@ -177,7 +177,7 @@ public class OkHttpUtils {
private String execute(Request request) {
try (Response response = okHttpClient.newCall(request).execute()) {
if (response.isSuccessful()) {
return response.body().string();
return String.valueOf(response.body());
}
} catch (Exception e) {
log.error(Throwables.getStackTraceAsString(e));

@ -171,14 +171,15 @@ public class RedisUtils {
*/
public Boolean execLimitLua(RedisScript<Long> redisScript, List<String> keys, String... args) {
// 可变参数转数组
String[] argsArray = args != null ? args : new String[0];
try {
Long execute = redisTemplate.execute(redisScript, keys, args);
Long execute = redisTemplate.execute(redisScript, keys, (Object[]) argsArray);
if (Objects.isNull(execute)) {
return false;
}
return CommonConstant.TRUE.equals(execute.intValue());
} catch (Exception e) {
log.error("redis execLimitLua fail! e:{}", Throwables.getStackTraceAsString(e));
}
return false;

@ -34,7 +34,7 @@ public class AustinApplication implements CommandLineRunner {
}
@Override
public void run(String... args) throws Exception {
public void run(String... args) {
log.info(AnsiOutput.toString(AustinConstant.PROJECT_BANNER, "\n", AnsiColor.GREEN, AustinConstant.PROJECT_NAME, AnsiColor.DEFAULT, AnsiStyle.FAINT));
log.info("Austin start succeeded, Index >> http://127.0.0.1:{}/", serverPort);
log.info("Austin start succeeded, Swagger Url >> http://127.0.0.1:{}/swagger-ui/index.html", serverPort);

@ -1,10 +1,12 @@
package com.java3y.austin.web.controller;
import cn.binarywang.wx.miniapp.api.WxMaService;
import cn.hutool.http.HttpUtil;
import com.alipay.api.AlipayClient;
import com.alipay.api.domain.AlipayOpenMiniMessageTemplateBatchqueryModel;
import com.alipay.api.domain.MerchantMsgTemplateVO;
import com.alipay.api.request.AlipayOpenMiniMessageTemplateBatchqueryRequest;
import com.alipay.api.response.AlipayOpenMiniMessageTemplateBatchqueryResponse;
import com.google.common.base.Throwables;
import com.java3y.austin.common.constant.SendChanelUrlConstant;
import com.java3y.austin.common.dto.account.AlipayMiniProgramAccount;
@ -19,15 +21,11 @@ import com.java3y.austin.web.vo.amis.CommonAmisVo;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import lombok.extern.slf4j.Slf4j;
import me.chanjar.weixin.common.bean.subscribemsg.TemplateInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.alipay.api.request.AlipayOpenMiniMessageTemplateBatchqueryRequest;
import com.alipay.api.response.AlipayOpenMiniMessageTemplateBatchqueryResponse;
import com.alipay.api.domain.AlipayOpenMiniMessageTemplateBatchqueryModel;
import java.util.ArrayList;
import java.util.List;
@ -62,7 +60,7 @@ public class AlipayMiniProgramController {
AlipayOpenMiniMessageTemplateBatchqueryModel model = new AlipayOpenMiniMessageTemplateBatchqueryModel();
// 设置子板状态列表
List<String> statusList = new ArrayList<String>();
List<String> statusList = new ArrayList<>();
statusList.add("STARTED");
model.setStatusList(statusList);
@ -114,7 +112,7 @@ public class AlipayMiniProgramController {
AlipayOpenMiniMessageTemplateBatchqueryModel model = new AlipayOpenMiniMessageTemplateBatchqueryModel();
// 设置子板状态列表
List<String> statusList = new ArrayList<String>();
List<String> statusList = new ArrayList<>();
statusList.add("STARTED");
model.setStatusList(statusList);

@ -5,6 +5,7 @@ import cn.hutool.core.text.CharSequenceUtil;
import cn.hutool.core.text.StrPool;
import cn.hutool.core.util.IdUtil;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import com.google.common.base.Throwables;
import com.java3y.austin.common.enums.RespStatusEnum;
import com.java3y.austin.common.vo.BasicResultVO;
@ -137,7 +138,7 @@ public class MessageTemplateController {
@ApiOperation("/测试发送接口")
public SendResponse test(@RequestBody MessageTemplateParam messageTemplateParam) {
Map<String, String> variables = JSON.parseObject(messageTemplateParam.getMsgContent(), Map.class);
Map<String, String> variables = JSON.parseObject(messageTemplateParam.getMsgContent(), new TypeReference<Map<String, String>>() {});
MessageParam messageParam = MessageParam.builder().receiver(messageTemplateParam.getReceiver()).variables(variables).build();
SendRequest sendRequest = SendRequest.builder().code(BusinessCode.COMMON_SEND.getCode()).messageTemplateId(messageTemplateParam.getId()).messageParam(messageParam).build();
SendResponse response = sendService.send(sendRequest);

@ -35,7 +35,7 @@ public class ExceptionHandlerAdvice {
@ResponseStatus(HttpStatus.OK)
public BasicResultVO<RespStatusEnum> commonResponse(CommonException ce) {
log.error(Throwables.getStackTrace(ce));
return new BasicResultVO(ce.getCode(), ce.getMessage(), ce.getRespStatusEnum());
return new BasicResultVO<>(ce.getCode(), ce.getMessage(), ce.getRespStatusEnum());
}
}

@ -101,7 +101,7 @@ public class DataServiceImpl implements DataService {
Integer sendDate = Integer.valueOf(DateUtil.format(new Date(dataParam.getDateTime() * 1000L), DatePattern.PURE_DATE_PATTERN));
List<SmsRecord> smsRecordList = smsRecordDao.findByPhoneAndSendDate(Long.valueOf(dataParam.getReceiver()), sendDate);
if (CollUtil.isEmpty(smsRecordList)) {
return SmsTimeLineVo.builder().items(Arrays.asList(SmsTimeLineVo.ItemsVO.builder().build())).build();
return SmsTimeLineVo.builder().items(Collections.singletonList(SmsTimeLineVo.ItemsVO.builder().build())).build();
}
Map<String, List<SmsRecord>> maps = smsRecordList.stream().collect(Collectors.groupingBy(o -> o.getPhone() + o.getSeriesId()));

@ -55,7 +55,7 @@ public class MaterialServiceImpl implements MaterialService {
String accessToken = accessTokenUtils.getAccessToken(ChannelType.DING_DING_WORK_NOTICE.getCode(), Integer.valueOf(sendAccount), account, false);
DingTalkClient client = new DefaultDingTalkClient(SendChanelUrlConstant.DING_DING_UPLOAD_URL);
OapiMediaUploadRequest req = new OapiMediaUploadRequest();
FileItem item = new FileItem(new StringBuilder().append(IdUtil.fastSimpleUUID()).append(file.getOriginalFilename()).toString(),
FileItem item = new FileItem(IdUtil.fastSimpleUUID() + file.getOriginalFilename(),
file.getInputStream());
req.setMedia(item);
req.setType(EnumUtil.getDescriptionByCode(Integer.valueOf(fileType), FileType.class));

@ -57,7 +57,7 @@ public class Convert4Amis {
* (json
* ()
*/
private static final List<String> FLAT_FIELD_NAME = Arrays.asList("msgContent");
private static final List<String> FLAT_FIELD_NAME = Collections.singletonList("msgContent");
/**
* jsonArray
@ -344,7 +344,7 @@ public class Convert4Amis {
List<CommonAmisVo.ColumnsDTO> columnsDtoS = new ArrayList<>();
//使用i作为变量循环
for (int i=0;i<data.length;i++) {
String name ="keyword"+String.valueOf(i+1);
String name ="keyword"+ (i + 1);
String label = data[i];
CommonAmisVo.ColumnsDTO columnsDTO = CommonAmisVo.ColumnsDTO.builder()
.name(name).type("input-text").required(true).quickEdit(true).label(label).build();
@ -437,9 +437,9 @@ public class Convert4Amis {
return EchartsVo.builder()
.title(EchartsVo.TitleVO.builder().text(title).build())
.legend(EchartsVo.LegendVO.builder().data(Arrays.asList("人数")).build())
.legend(EchartsVo.LegendVO.builder().data(Collections.singletonList("人数")).build())
.xAxis(EchartsVo.XaxisVO.builder().data(xAxisList).build())
.series(Arrays.asList(EchartsVo.SeriesVO.builder().name("人数").type("bar").data(actualData).build()))
.series(Collections.singletonList(EchartsVo.SeriesVO.builder().name("人数").type("bar").data(actualData).build()))
.yAxis(EchartsVo.YaxisVO.builder().build())
.tooltip(EchartsVo.TooltipVO.builder().build())
.build();
@ -465,11 +465,11 @@ public class Convert4Amis {
itemsVO.setBusinessId(String.valueOf(smsRecord.getMessageTemplateId()));
itemsVO.setContent(smsRecord.getMsgContent());
itemsVO.setSendType(EnumUtil.getDescriptionByCode(smsRecord.getStatus(), SmsStatus.class));
itemsVO.setSendTime(DateUtil.format(new Date(Long.valueOf(smsRecord.getCreated() * 1000L)), DatePattern.NORM_DATETIME_PATTERN));
itemsVO.setSendTime(DateUtil.format(new Date(smsRecord.getCreated() * 1000L), DatePattern.NORM_DATETIME_PATTERN));
} else {
itemsVO.setReceiveType(EnumUtil.getDescriptionByCode(smsRecord.getStatus(), SmsStatus.class));
itemsVO.setReceiveContent(smsRecord.getReportContent());
itemsVO.setReceiveTime(DateUtil.format(new Date(Long.valueOf(smsRecord.getUpdated() * 1000L)), DatePattern.NORM_DATETIME_PATTERN));
itemsVO.setReceiveTime(DateUtil.format(new Date(smsRecord.getUpdated() * 1000L), DatePattern.NORM_DATETIME_PATTERN));
}
}
itemsVoS.add(itemsVO);

@ -4,9 +4,9 @@ import lombok.extern.slf4j.Slf4j;
import org.springframework.web.multipart.MultipartFile;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
/**
@ -27,13 +27,13 @@ public class SpringFileUtils {
public static File getFile(MultipartFile multipartFile) {
String fileName = multipartFile.getOriginalFilename();
File file = new File(fileName);
try (OutputStream out = new FileOutputStream(file)){
try (OutputStream out = Files.newOutputStream(file.toPath())){
byte[] ss = multipartFile.getBytes();
for (int i = 0; i < ss.length; i++) {
out.write(ss[i]);
for (byte s : ss) {
out.write(s);
}
} catch (IOException e) {
log.error("SpringFileUtils#getFile multipartFile is converted to File error:{}", e);
log.error("SpringFileUtils#getFile multipartFile is converted to File error:{}", e.toString());
return null;
}
return file;

@ -37,11 +37,11 @@ services:
- mongo
- elasticsearch
ports:
- 9009:9000
- 1514:1514
- 1514:1514/udp
- 12201:12201
- 12201:12201/udp
- "9009:9000"
- "1514:1514"
- "1514:1514/udp"
- "12201:12201"
- "12201:12201/udp"
networks:
graylog:
driver: bridge

@ -6,7 +6,7 @@ services:
container_name: mysql
restart: always
ports:
- 3306:3306
- "3306:3306"
volumes:
- mysql-data:/var/lib/mysql
environment:

@ -22,8 +22,8 @@ services:
- /home/nacos/cluster-logs/nacos-server01:/home/nacos/logs
- /home/nacos/init.d:/home/nacos/init.d
ports:
- 8846:8848
- 9555:9555
- "8846:8848"
- "9555:9555"
restart: on-failure
nacos2:
@ -47,7 +47,7 @@ services:
- /home/nacos/cluster-logs/nacos-server02:/home/nacos/logs
- /home/nacos/init.d:/home/nacos/init.d
ports:
- 8847:8848
- "8847:8848"
restart: on-failure
nacos3:
@ -71,5 +71,5 @@ services:
- /home/nacos/cluster-logs/nacos-server03:/home/nacos/logs
- /home/nacos/init.d:/home/nacos/init.d
ports:
- 8848:8848
- "8848:8848"
restart: on-failure

@ -21,7 +21,7 @@ services:
- /home/nacos/single-logs/nacos-server:/home/nacos/logs
- /home/nacos/init.d:/home/nacos/init.d
ports:
- 8848:8848
- 9848:9848
- 9849:9849
- "8848:8848"
- "9848:9848"
- "9849:9849"
restart: on-failure

@ -1,925 +0,0 @@
%% -*- mode: erlang -*-
%% ----------------------------------------------------------------------------
%% Classic RabbitMQ configuration format example.
%% This format should be considered DEPRECATED.
%%
%% Users of RabbitMQ 3.7.x
%% or later should prefer the new style format (rabbitmq.conf)
%% in combination with an advanced.config file (as needed).
%%
%% Related doc guide: https://www.rabbitmq.com/configure.html. See
%% https://rabbitmq.com/documentation.html for documentation ToC.
%% ----------------------------------------------------------------------------
[
{rabbit,
[%%
%% Networking
%% ====================
%%
%% Related doc guide: https://www.rabbitmq.com/networking.html.
%% By default, RabbitMQ will listen on all interfaces, using
%% the standard (reserved) AMQP port.
%%
%% {tcp_listeners, [5672]},
%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
%% For example, to listen only on localhost for both IPv4 and IPv6:
%%
%% {tcp_listeners, [{"127.0.0.1", 5672},
%% {"::1", 5672}]},
%% TLS listeners are configured in the same fashion as TCP listeners,
%% including the option to control the choice of interface.
%%
%% {ssl_listeners, [5671]},
%% Number of Erlang processes that will accept connections for the TCP
%% and TLS listeners.
%%
%% {num_tcp_acceptors, 10},
%% {num_ssl_acceptors, 1},
%% Maximum time for AMQP 0-8/0-9/0-9-1 handshake (after socket connection
%% and TLS handshake), in milliseconds.
%%
%% {handshake_timeout, 10000},
%% Set to 'true' to perform reverse DNS lookups when accepting a
%% connection. Hostnames will then be shown instead of IP addresses
%% in rabbitmqctl and the management plugin.
%%
%% {reverse_dns_lookups, false},
%%
%% Security, Access Control
%% ========================
%%
%% Related doc guide: https://www.rabbitmq.com/access-control.html.
%% The default "guest" user is only permitted to access the server
%% via a loopback interface (e.g. localhost).
%% {loopback_users, [<<"guest">>]},
%%
%% Uncomment the following line if you want to allow access to the
%% guest user from anywhere on the network.
%% {loopback_users, []},
%% TLS configuration.
%%
%% Related doc guide: https://www.rabbitmq.com/ssl.html.
%%
%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
%% {certfile, "/path/to/server/cert.pem"},
%% {keyfile, "/path/to/server/key.pem"},
%% {verify, verify_peer},
%% {fail_if_no_peer_cert, false}]},
%% Choose the available SASL mechanism(s) to expose.
%% The two default (built in) mechanisms are 'PLAIN' and
%% 'AMQPLAIN'. Additional mechanisms can be added via
%% plugins.
%%
%% Related doc guide: https://www.rabbitmq.com/authentication.html.
%%
%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
%% Select an authentication database to use. RabbitMQ comes bundled
%% with a built-in auth-database, based on mnesia.
%%
%% {auth_backends, [rabbit_auth_backend_internal]},
%% Configurations supporting the rabbitmq_auth_mechanism_ssl and
%% rabbitmq_auth_backend_ldap plugins.
%%
%% NB: These options require that the relevant plugin is enabled.
%% Related doc guide: https://www.rabbitmq.com/plugins.html for further details.
%% The RabbitMQ-auth-mechanism-ssl plugin makes it possible to
%% authenticate a user based on the client's TLS certificate.
%%
%% To use auth-mechanism-ssl, add to or replace the auth_mechanisms
%% list with the entry 'EXTERNAL'.
%%
%% {auth_mechanisms, ['EXTERNAL']},
%% The rabbitmq_auth_backend_ldap plugin allows the broker to
%% perform authentication and authorisation by deferring to an
%% external LDAP server.
%%
%% For more information about configuring the LDAP backend, see
%% https://www.rabbitmq.com/ldap.html.
%%
%% Enable the LDAP auth backend by adding to or replacing the
%% auth_backends entry:
%%
%% {auth_backends, [rabbit_auth_backend_ldap]},
%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
%% configuration section later in this file and the README in
%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
%% details.
%%
%% To use the TLS cert's CN instead of its DN as the username
%%
%% {ssl_cert_login_from, distinguished_name},
%% TLS handshake timeout, in milliseconds.
%%
%% {ssl_handshake_timeout, 5000},
%% Makes RabbitMQ accept SSLv3 client connections by default.
%% DO NOT DO THIS IF YOU CAN HELP IT.
%%
%% {ssl_allow_poodle_attack, false},
%% Password hashing implementation. Will only affect newly
%% created users. To recalculate hash for an existing user
%% it's necessary to update her password.
%%
%% When importing definitions exported from versions earlier
%% than 3.6.0, it is possible to go back to MD5 (only do this
%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
%%
%% To use SHA-512, set to rabbit_password_hashing_sha512.
%%
%% {password_hashing_module, rabbit_password_hashing_sha256},
%% Configuration entry encryption.
%% Related doc guide: https://www.rabbitmq.com/configure.html#configuration-encryption
%%
%% To specify the passphrase in the configuration file:
%%
%% {config_entry_decoder, [{passphrase, <<"mypassphrase">>}]}
%%
%% To specify the passphrase in an external file:
%%
%% {config_entry_decoder, [{passphrase, {file, "/path/to/passphrase/file"}}]}
%%
%% To make the broker request the passphrase when it starts:
%%
%% {config_entry_decoder, [{passphrase, prompt}]}
%%
%% To change encryption settings:
%%
%% {config_entry_decoder, [{cipher, aes_cbc256},
%% {hash, sha512},
%% {iterations, 1000}]}
%%
%% Default User / VHost
%% ====================
%%
%% On first start RabbitMQ will create a vhost and a user. These
%% config items control what gets created. See
%% https://www.rabbitmq.com/access-control.html for further
%% information about vhosts and access control.
%%
%% {default_vhost, <<"/">>},
%% {default_user, <<"guest">>},
%% {default_pass, <<"guest">>},
%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
%% Tags for default user
%%
%% Related doc guide: https://www.rabbitmq.com/management.html.
%%
%% {default_user_tags, [administrator]},
%%
%% Additional network and protocol related configuration
%% =====================================================
%%
%% Sets the default AMQP 0-9-1 heartbeat timeout in seconds.
%% Values lower than 6 can produce false positives and are not
%% recommended.
%%
%% Related doc guides:
%%
%% * https://www.rabbitmq.com/heartbeats.html
%% * https://www.rabbitmq.com/networking.html
%%
%% {heartbeat, 60},
%% Set the max permissible size of an AMQP frame (in bytes).
%%
%% {frame_max, 131072},
%% Set the max frame size the server will accept before connection
%% tuning occurs
%%
%% {initial_frame_max, 4096},
%% Set the max permissible number of channels per connection.
%% 0 means "no limit".
%%
%% {channel_max, 0},
%% Set the max permissible number of client connections to the node.
%% `infinity` means "no limit".
%%
%% This limit applies to client connections to all listeners (regardless of
%% the protocol, whether TLS is used and so on). CLI tools and inter-node
%% connections are exempt.
%%
%% When client connections are rapidly opened in succession, it is possible
%% for the total connection count to go slightly higher than the configured limit.
%% The limit works well as a general safety measure.
%%
%% Clients that are hitting the limit will see their TCP connections fail or time out.
%%
%% Introduced in 3.6.13.
%%
%% Related doc guide: https://www.rabbitmq.com/networking.html.
%%
%% {connection_max, infinity},
%% TCP socket options.
%%
%% Related doc guide: https://www.rabbitmq.com/networking.html.
%%
%% {tcp_listen_options, [{backlog, 128},
%% {nodelay, true},
%% {exit_on_close, false}]},
%%
%% Resource Limits & Flow Control
%% ==============================
%%
%% Related doc guide: https://www.rabbitmq.com/memory.html, https://www.rabbitmq.com/memory-use.html.
%% Memory-based Flow Control threshold.
%%
%% {vm_memory_high_watermark, 0.4},
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
%%
%% {vm_memory_high_watermark, {absolute, 1073741824}},
%%
%% Or you can set absolute value using memory units (with RabbitMQ 3.6.0+).
%%
%% {vm_memory_high_watermark, {absolute, "1024M"}},
%%
%% Supported unit symbols:
%%
%% k, kiB: kibibytes (2^10 - 1,024 bytes)
%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
%% kB: kilobytes (10^3 - 1,000 bytes)
%% MB: megabytes (10^6 - 1,000,000 bytes)
%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
%% Fraction of the high watermark limit at which queues start to
%% page message out to disc in order to free up memory.
%% For example, when vm_memory_high_watermark is set to 0.4 and this value is set to 0.5,
%% paging can begin as early as when 20% of total available RAM is used by the node.
%%
%% Values greater than 1.0 can be dangerous and should be used carefully.
%%
%% One alternative to this is to use durable queues and publish messages
%% as persistent (delivery mode = 2). With this combination queues will
%% move messages to disk much more rapidly.
%%
%% Another alternative is to configure queues to page all messages (both
%% persistent and transient) to disk as quickly
%% as possible, see https://www.rabbitmq.com/lazy-queues.html.
%%
%% {vm_memory_high_watermark_paging_ratio, 0.5},
%% Selects Erlang VM memory consumption calculation strategy. Can be `allocated`, `rss` or `legacy` (aliased as `erlang`),
%% Introduced in 3.6.11. `rss` is the default as of 3.6.12.
%% See https://github.com/rabbitmq/rabbitmq-server/issues/1223 and rabbitmq/rabbitmq-common#224 for background.
%% {vm_memory_calculation_strategy, rss},
%% Interval (in milliseconds) at which we perform the check of the memory
%% levels against the watermarks.
%%
%% {memory_monitor_interval, 2500},
%% The total memory available can be calculated from the OS resources
%% - default option - or provided as a configuration parameter:
%% {total_memory_available_override_value, "5000MB"},
%% Set disk free limit (in bytes). Once free disk space reaches this
%% lower bound, a disk alarm will be set - see the documentation
%% listed above for more details.
%%
%% {disk_free_limit, 50000000},
%%
%% Or you can set it using memory units (same as in vm_memory_high_watermark)
%% with RabbitMQ 3.6.0+.
%% {disk_free_limit, "50MB"},
%% {disk_free_limit, "50000kB"},
%% {disk_free_limit, "2GB"},
%% Alternatively, we can set a limit relative to total available RAM.
%%
%% Values lower than 1.0 can be dangerous and should be used carefully.
%% {disk_free_limit, {mem_relative, 2.0}},
%%
%% Clustering
%% =====================
%%
%% Queue master location strategy:
%% * <<"min-masters">>
%% * <<"client-local">>
%% * <<"random">>
%%
%% Related doc guide: https://www.rabbitmq.com/ha.html#queue-master-location
%%
%% {queue_master_locator, <<"client-local">>},
%% Batch size (number of messages) used during eager queue mirror synchronisation.
%% Related doc guide: https://www.rabbitmq.com/ha.html#batch-sync. When average message size is relatively large
%% (say, 10s of kilobytes or greater), reducing this value will decrease peak amount
%% of RAM used by newly joining nodes that need eager synchronisation.
%%
%% {mirroring_sync_batch_size, 4096},
%% Enables flow control between queue mirrors.
%% Disabling this can be dangerous and is not recommended.
%% When flow control is disabled, queue masters can outpace mirrors and not allow mirrors to catch up.
%% Mirrors will end up using increasingly more RAM, eventually triggering a memory alarm.
%%
%% {mirroring_flow_control, true},
%% Additional server properties to announce to connecting clients.
%%
%% {server_properties, []},
%% How to respond to cluster partitions.
%% Related doc guide: https://www.rabbitmq.com/partitions.html
%%
%% {cluster_partition_handling, ignore},
%% Mirror sync batch size, in messages. Increasing this will speed
%% up syncing but total batch size in bytes must not exceed 2 GiB.
%% Available in RabbitMQ 3.6.0 or later.
%%
%% {mirroring_sync_batch_size, 4096},
%% Make clustering happen *automatically* at startup - only applied
%% to nodes that have just been reset or started for the first time.
%% Related doc guide: https://www.rabbitmq.com/clustering.html#auto-config
%%
%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
%% Interval (in milliseconds) at which we send keepalive messages
%% to other cluster members. Note that this is not the same thing
%% as net_ticktime; missed keepalive messages will not cause nodes
%% to be considered down.
%%
%% {cluster_keepalive_interval, 10000},
%%
%% Statistics Collection
%% =====================
%%
%% Set (internal) statistics collection granularity.
%%
%% {collect_statistics, none},
%% Statistics collection interval (in milliseconds). Increasing
%% this will reduce the load on management database.
%%
%% {collect_statistics_interval, 5000},
%% Enables vhosts tracing.
%%
%% {trace_vhosts, []},
%% Explicitly enable/disable HiPE compilation.
%%
%% {hipe_compile, false},
%% Number of delegate processes to use for intra-cluster communication.
%% On a node which is part of cluster, has more than 16 cores and plenty of network bandwidth,
%% it may make sense to increase this value.
%%
%% {delegate_count, 16},
%% Number of times to retry while waiting for internal database tables (Mnesia tables) to sync
%% from a peer. In deployments where nodes can take a long time to boot, this value
%% may need increasing.
%%
%% {mnesia_table_loading_retry_limit, 10},
%% Amount of time in milliseconds which this node will wait for internal database tables (Mnesia tables) to sync
%% from a peer. In deployments where nodes can take a long time to boot, this value
%% may need increasing.
%%
%% {mnesia_table_loading_retry_timeout, 30000},
%% Size in bytes below which to embed messages in the queue index.
%% Related doc guide: https://www.rabbitmq.com/persistence-conf.html
%%
%% {queue_index_embed_msgs_below, 4096},
%% Maximum number of queue index entries to keep in journal
%% Related doc guide: https://www.rabbitmq.com/persistence-conf.html.
%%
%% {queue_index_max_journal_entries, 32768},
%% Number of credits that a queue process is given by the message store
%% By default, a queue process is given 4000 message store credits,
%% and then 800 for every 800 messages that it processes.
%%
%% {msg_store_credit_disc_bound, {4000, 800}},
%% Minimum number of messages with their queue position held in RAM required
%% to trigger writing their queue position to disk.
%%
%% This value MUST be higher than the initial msg_store_credit_disc_bound value,
%% otherwise paging performance may worsen.
%%
%% {msg_store_io_batch_size, 4096},
%% Number of credits that a connection, channel or queue are given.
%%
%% By default, every connection, channel or queue is given 400 credits,
%% and then 200 for every 200 messages that it sends to a peer process.
%% Increasing these values may help with throughput but also can be dangerous:
%% high credit flow values are no different from not having flow control at all.
%%
%% Related doc guide: https://www.rabbitmq.com/blog/2015/10/06/new-credit-flow-settings-on-rabbitmq-3-5-5/
%% and http://alvaro-videla.com/2013/09/rabbitmq-internals-credit-flow-for-erlang-processes.html.
%%
%% {credit_flow_default_credit, {400, 200}},
%% Number of milliseconds before a channel operation times out.
%%
%% {channel_operation_timeout, 15000},
%% Number of queue operations required to trigger an explicit garbage collection.
%% Increasing this value may reduce CPU load and increase peak RAM consumption of queues.
%%
%% {queue_explicit_gc_run_operation_threshold, 1000},
%% Number of lazy queue operations required to trigger an explicit garbage collection.
%% Increasing this value may reduce CPU load and increase peak RAM consumption of lazy queues.
%%
%% {lazy_queue_explicit_gc_run_operation_threshold, 1000},
%% Number of times disk monitor will retry free disk space queries before
%% giving up.
%%
%% {disk_monitor_failure_retries, 10},
%% Milliseconds to wait between disk monitor retries on failures.
%%
%% {disk_monitor_failure_retry_interval, 120000},
%% Whether or not to enable background periodic forced GC runs for all
%% Erlang processes on the node in "waiting" state.
%%
%% Disabling background GC may reduce latency for client operations,
%% keeping it enabled may reduce median RAM usage by the binary heap
%% (see https://www.erlang-solutions.com/blog/erlang-garbage-collector.html).
%%
%% Before enabling this option, please take a look at the memory
%% breakdown (https://www.rabbitmq.com/memory-use.html).
%%
%% {background_gc_enabled, false},
%% Interval (in milliseconds) at which we run background GC.
%%
%% {background_gc_target_interval, 60000},
%% Message store operations are stored in a sequence of files called segments.
%% This controls max size of a segment file.
%% Increasing this value may speed up (sequential) disk writes but will slow down segment GC process.
%% DO NOT CHANGE THIS for existing installations.
%%
%% {msg_store_file_size_limit, 16777216},
%% Whether or not to enable file write buffering.
%%
%% {fhc_write_buffering, true},
%% Whether or not to enable file read buffering. Enabling
%% this may slightly speed up reads but will also increase
%% node's memory consumption, in particular on boot.
%%
%% {fhc_read_buffering, false}
]},
%% ----------------------------------------------------------------------------
%% Advanced Erlang Networking/Clustering Options.
%%
%% Related doc guide: https://www.rabbitmq.com/clustering.html
%% ----------------------------------------------------------------------------
{kernel,
[%% Sets the net_kernel tick time.
%% Please see http://erlang.org/doc/man/kernel_app.html and
%% https://www.rabbitmq.com/nettick.html for further details.
%%
%% {net_ticktime, 60}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ Management Plugin
%%
%% Related doc guide: https://www.rabbitmq.com/management.html
%% ----------------------------------------------------------------------------
{rabbitmq_management,
[%% Preload schema definitions from a previously exported definitions file. See
%% https://www.rabbitmq.com/management.html#load-definitions
%%
%% {load_definitions, "/path/to/exported/definitions.json"},
%% Log all requests to the management HTTP API to a directory.
%%
%% {http_log_dir, "/path/to/rabbitmq/logs/http"},
%% Change the port on which the HTTP listener listens,
%% specifying an interface for the web server to bind to.
%% Also set the listener to use TLS and provide TLS options.
%%
%% {listener, [{port, 12345},
%% {ip, "127.0.0.1"},
%% {ssl, true},
%% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"},
%% {certfile, "/path/to/cert.pem"},
%% {keyfile, "/path/to/key.pem"}]}]},
%% One of 'basic', 'detailed' or 'none'. See
%% https://www.rabbitmq.com/management.html#fine-stats for more details.
%% {rates_mode, basic},
%% Configure how long aggregated data (such as message rates and queue
%% lengths) is retained. Please read the plugin's documentation in
%% https://www.rabbitmq.com/management.html#configuration for more
%% details.
%%
%% {sample_retention_policies,
%% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
%% {basic, [{60, 5}, {3600, 60}]},
%% {detailed, [{10, 5}]}]}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ Shovel Plugin
%%
%% Related doc guide: https://www.rabbitmq.com/shovel.html
%% ----------------------------------------------------------------------------
{rabbitmq_shovel,
[{shovels,
[%% A named shovel worker.
%% {my_first_shovel,
%% [
%% List the source broker(s) from which to consume.
%%
%% {sources,
%% [%% URI(s) and pre-declarations for all source broker(s).
%% {brokers, ["amqp://user:password@host.domain/my_vhost"]},
%% {declarations, []}
%% ]},
%% List the destination broker(s) to publish to.
%% {destinations,
%% [%% A singular version of the 'brokers' element.
%% {broker, "amqp://"},
%% {declarations, []}
%% ]},
%% Name of the queue to shovel messages from.
%%
%% {queue, <<"your-queue-name-goes-here">>},
%% Optional prefetch count.
%%
%% {prefetch_count, 10},
%% when to acknowledge messages:
%% - no_ack: never (auto)
%% - on_publish: after each message is republished
%% - on_confirm: when the destination broker confirms receipt
%%
%% {ack_mode, on_confirm},
%% Overwrite fields of the outbound basic.publish.
%%
%% {publish_fields, [{exchange, <<"my_exchange">>},
%% {routing_key, <<"from_shovel">>}]},
%% Static list of basic.properties to set on re-publication.
%%
%% {publish_properties, [{delivery_mode, 2}]},
%% The number of seconds to wait before attempting to
%% reconnect in the event of a connection failure.
%%
%% {reconnect_delay, 2.5}
%% ]} %% End of my_first_shovel
]}
%% Rather than specifying some values per-shovel, you can specify
%% them for all shovels here.
%%
%% {defaults, [{prefetch_count, 0},
%% {ack_mode, on_confirm},
%% {publish_fields, []},
%% {publish_properties, [{delivery_mode, 2}]},
%% {reconnect_delay, 2.5}]}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ STOMP Plugin
%%
%% Related doc guide: https://www.rabbitmq.com/stomp.html
%% ----------------------------------------------------------------------------
{rabbitmq_stomp,
[%% Network Configuration - the format is generally the same as for the broker
%% Listen only on localhost (ipv4 & ipv6) on a specific port.
%% {tcp_listeners, [{"127.0.0.1", 61613},
%% {"::1", 61613}]},
%% Listen for TLS connections on a specific port.
%% {ssl_listeners, [61614]},
%% Number of Erlang processes that will accept connections for the TCP
%% and TLS listeners.
%%
%% {num_tcp_acceptors, 10},
%% {num_ssl_acceptors, 1},
%% Additional TLS options
%% Extract a name from the client's certificate when using TLS.
%%
%% {ssl_cert_login, true},
%% Set a default user name and password. This is used as the default login
%% whenever a CONNECT frame omits the login and passcode headers.
%%
%% Please note that setting this will allow clients to connect without
%% authenticating!
%%
%% {default_user, [{login, "guest"},
%% {passcode, "guest"}]},
%% If a default user is configured, or you have configured use TLS client
%% certificate based authentication, you can choose to allow clients to
%% omit the CONNECT frame entirely. If set to true, the client is
%% automatically connected as the default user or user supplied in the
%% TLS certificate whenever the first frame sent on a session is not a
%% CONNECT frame.
%%
%% {implicit_connect, true},
%% Whether or not to enable proxy protocol support.
%% Once enabled, clients cannot directly connect to the broker
%% anymore. They must connect through a load balancer that sends the
%% proxy protocol header to the broker at connection time.
%% This setting applies only to STOMP clients, other protocols
%% like MQTT or AMQP have their own setting to enable proxy protocol.
%% See the plugins or broker documentation for more information.
%%
%% {proxy_protocol, false}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ MQTT Plugin
%%
%% Related doc guide: https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
%%
%% ----------------------------------------------------------------------------
{rabbitmq_mqtt,
[%% Set the default user name and password. Will be used as the default login
%% if a connecting client provides no other login details.
%%
%% Please note that setting this will allow clients to connect without
%% authenticating!
%%
%% {default_user, <<"guest">>},
%% {default_pass, <<"guest">>},
%% Enable anonymous access. If this is set to false, clients MUST provide
%% login information in order to connect. See the default_user/default_pass
%% configuration elements for managing logins without authentication.
%%
%% {allow_anonymous, true},
%% If you have multiple chosts, specify the one to which the
%% adapter connects.
%%
%% {vhost, <<"/">>},
%% Specify the exchange to which messages from MQTT clients are published.
%%
%% {exchange, <<"amq.topic">>},
%% Specify TTL (time to live) to control the lifetime of non-clean sessions.
%%
%% {subscription_ttl, 1800000},
%% Set the prefetch count (governing the maximum number of unacknowledged
%% messages that will be delivered).
%%
%% {prefetch, 10},
%% TLS listeners.
%% See https://www.rabbitmq.com/networking.html
%%
%% {tcp_listeners, [1883]},
%% {ssl_listeners, []},
%% Number of Erlang processes that will accept connections for the TCP
%% and TLS listeners.
%% See https://www.rabbitmq.com/networking.html
%%
%% {num_tcp_acceptors, 10},
%% {num_ssl_acceptors, 1},
%% TCP socket options.
%% See https://www.rabbitmq.com/networking.html
%%
%% {tcp_listen_options, [
%% {backlog, 128},
%% {linger, {true, 0}},
%% {exit_on_close, false}
%% ]},
%% Whether or not to enable proxy protocol support.
%% Once enabled, clients cannot directly connect to the broker
%% anymore. They must connect through a load balancer that sends the
%% proxy protocol header to the broker at connection time.
%% This setting applies only to MQTT clients, other protocols
%% like STOMP or AMQP have their own setting to enable proxy protocol.
%% See the plugins or broker documentation for more information.
%%
%% {proxy_protocol, false}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ AMQP 1.0 Support
%%
%% Related doc guide: https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md
%%
%% ----------------------------------------------------------------------------
{rabbitmq_amqp1_0,
[%% Connections that are not authenticated with SASL will connect as this
%% account. See the README for more information.
%%
%% Please note that setting this will allow clients to connect without
%% authenticating!
%%
%% {default_user, "guest"},
%% Enable protocol strict mode. See the README for more information.
%%
%% {protocol_strict_mode, false}
]},
%% ----------------------------------------------------------------------------
%% RabbitMQ LDAP Plugin
%%
%% Related doc guide: https://www.rabbitmq.com/ldap.html.
%%
%% ----------------------------------------------------------------------------
{rabbitmq_auth_backend_ldap,
[%%
%% Connecting to the LDAP server(s)
%% ================================
%%
%% Specify servers to bind to. You *must* set this in order for the plugin
%% to work properly.
%%
%% {servers, ["your-server-name-goes-here"]},
%% Connect to the LDAP server using TLS
%%
%% {use_ssl, false},
%% Specify the LDAP port to connect to
%%
%% {port, 389},
%% LDAP connection timeout, in milliseconds or 'infinity'
%%
%% {timeout, infinity},
%% Enable logging of LDAP queries.
%% One of
%% - false (no logging is performed)
%% - true (verbose logging of the logic used by the plugin)
%% - network (as true, but additionally logs LDAP network traffic)
%%
%% Defaults to false.
%%
%% {log, false},
%%
%% Authentication
%% ==============
%%
%% Pattern to convert the username given through AMQP to a DN before
%% binding
%%
%% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
%% Alternatively, you can convert a username to a Distinguished
%% Name via an LDAP lookup after binding. See the documentation for
%% full details.
%% When converting a username to a dn via a lookup, set these to
%% the name of the attribute that represents the user name, and the
%% base DN for the lookup query.
%%
%% {dn_lookup_attribute, "userPrincipalName"},
%% {dn_lookup_base, "DC=gopivotal,DC=com"},
%% Controls how to bind for authorisation queries and also to
%% retrieve the details of users logging in without presenting a
%% password (e.g., SASL EXTERNAL).
%% One of
%% - as_user (to bind as the authenticated user - requires a password)
%% - anon (to bind anonymously)
%% - {UserDN, Password} (to bind with a specified user name and password)
%%
%% Defaults to 'as_user'.
%%
%% {other_bind, as_user},
%%
%% Authorisation
%% =============
%%
%% The LDAP plugin can perform a variety of queries against your
%% LDAP server to determine questions of authorisation. See
%% https://www.rabbitmq.com/ldap.html#authorisation for more
%% information.
%% Set the query to use when determining vhost access
%%
%% {vhost_access_query, {in_group,
%% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
%% Set the query to use when determining resource (e.g., queue) access
%%
%% {resource_access_query, {constant, true}},
%% Set queries to determine which tags a user has
%%
%% {tag_queries, []}
]},
%% Lager controls logging.
%% See https://github.com/basho/lager for more documentation
{lager, [
%%
%% Log directory, taken from the RABBITMQ_LOG_BASE env variable by default.
%% {log_root, "/var/log/rabbitmq"},
%%
%% All log messages go to the default "sink" configured with
%% the `handlers` parameter. By default, it has a single
%% lager_file_backend handler writing messages to "$nodename.log"
%% (ie. the value of $RABBIT_LOGS).
%% {handlers, [
%% {lager_file_backend, [{file, "rabbit.log"},
%% {level, info},
%% {date, ""},
%% {size, 0}]}
%% ]},
%%
%% Extra sinks are used in RabbitMQ to categorize messages. By
%% default, those extra sinks are configured to forward messages
%% to the default sink (see above). "rabbit_log_lager_event"
%% is the default category where all RabbitMQ messages without
%% a category go. Messages in the "channel" category go to the
%% "rabbit_channel_lager_event" Lager extra sink, and so on.
%% {extra_sinks, [
%% {rabbit_log_lager_event, [{handlers, [
%% {lager_forwarder_backend,
%% [lager_event, info]}]}]},
%% {rabbit_channel_lager_event, [{handlers, [
%% {lager_forwarder_backend,
%% [lager_event, info]}]}]},
%% {rabbit_connection_lager_event, [{handlers, [
%% {lager_forwarder_backend,
%% [lager_event, info]}]}]},
%% {rabbit_mirroring_lager_event, [{handlers, [
%% {lager_forwarder_backend,
%% [lager_event, info]}]}]}
%% ]}
]}
].

@ -6,7 +6,7 @@ services:
container_name: redis
restart: always
ports:
- 6379:6379
- "6379:6379"
volumes:
- ./redis.conf:/usr/local/etc/redis/redis.conf:rw
- ./data:/data:rw

@ -5,7 +5,7 @@ services:
image: foxiswho/rocketmq:server
container_name: rocketmq_server
ports:
- 9876:9876
- "9876:9876"
volumes:
- ./rocketmq/rocketmq_server/logs:/opt/logs
- ./rocketmq/rocketmq_server/store:/opt/store
@ -19,8 +19,8 @@ services:
image: foxiswho/rocketmq:broker
container_name: rocketmq_broker
ports:
- 10909:10909
- 10911:10911
- "10909:10909"
- "10911:10911"
volumes:
- ./rocketmq/rocketmq_broker/logs:/opt/logs
- ./rocketmq/rocketmq_broker/store:/opt/store
@ -42,7 +42,7 @@ services:
image: styletang/rocketmq-console-ng
container_name: rocketmq_console_ng
ports:
- 9002:8080
- "9002:8080"
environment:
JAVA_OPTS: "-Drocketmq.namesrv.addr=rocketmq_server:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
depends_on:

@ -1,84 +1,70 @@
drop database if exists austin;
DROP DATABASE IF EXISTS `austin`;
create database austin;
CREATE DATABASE `austin`;
use austin;
USE `austin`;
drop table IF EXISTS message_template;
CREATE TABLE `message_template`
CREATE TABLE IF NOT EXISTS `message_template`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`name` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '标题',
`audit_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '当前消息审核状态: 10.待审核 20.审核成功 30.被拒绝',
`flow_id` varchar(50) COLLATE utf8mb4_unicode_ci COMMENT '工单ID',
`msg_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '当前消息状态10.新建 20.停用 30.启用 40.等待发送 50.发送中 60.发送成功 70.发送失败',
`cron_task_id` bigint(20) COMMENT '定时任务Id (xxl-job-admin返回)',
`cron_crowd_path` varchar(500) COMMENT '定时发送人群的文件路径',
`expect_push_time` varchar(100) COLLATE utf8mb4_unicode_ci COMMENT '期望发送时间0:立即发送 定时任务以及周期任务:cron表达式',
`id_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '消息的发送ID类型10. userId 20.did 30.手机号 40.openId 50.email 60.企业微信userId',
`send_channel` int(10) NOT NULL DEFAULT '0' COMMENT '消息发送渠道10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ',
`template_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.运营类 20.技术类接口调用',
`msg_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.通知类消息 20.营销类消息 30.验证码类消息',
`shield_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.夜间不屏蔽 20.夜间屏蔽 30.夜间屏蔽(次日早上9点发送)',
`msg_content` varchar(4096) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '消息内容 占位符用{$var}表示',
`send_account` int(10) NOT NULL DEFAULT '0' COMMENT '发送账号 一个渠道下可存在多个账号',
`creator` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建者',
`updator` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '更新者',
`auditor` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '审核人',
`team` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '业务方团队',
`proposer` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '业务方',
`is_deleted` tinyint(4) NOT NULL DEFAULT '0' COMMENT '是否删除0.不删除 1.删除',
`created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT,
`name` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '标题',
`audit_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '当前消息审核状态: 10.待审核 20.审核成功 30.被拒绝',
`flow_id` VARCHAR(50) COMMENT '工单ID',
`msg_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '当前消息状态10.新建 20.停用 30.启用 40.等待发送 50.发送中 60.发送成功 70.发送失败',
`cron_task_id` BIGINT(20) COMMENT '定时任务Id (xxl-job-admin返回)',
`cron_crowd_path` VARCHAR(500) COMMENT '定时发送人群的文件路径',
`expect_push_time` VARCHAR(100) COMMENT '期望发送时间0:立即发送 定时任务以及周期任务:cron表达式',
`id_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '消息的发送ID类型10. userId 20.did 30.手机号 40.openId 50.email 60.企业微信userId',
`send_channel` INT(10) NOT NULL DEFAULT '0' COMMENT '消息发送渠道10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ',
`template_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.运营类 20.技术类接口调用',
`msg_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.通知类消息 20.营销类消息 30.验证码类消息',
`shield_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.夜间不屏蔽 20.夜间屏蔽 30.夜间屏蔽(次日早上9点发送)',
`msg_content` VARCHAR(4096) NOT NULL DEFAULT '' COMMENT '消息内容 占位符用{$var}表示',
`send_account` INT(10) NOT NULL DEFAULT '0' COMMENT '发送账号 一个渠道下可存在多个账号',
`creator` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '创建者',
`updator` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '更新者',
`auditor` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '审核人',
`team` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '业务方团队',
`proposer` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '业务方',
`is_deleted` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '是否删除0.不删除 1.删除',
`created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `idx_channel` (`send_channel`)
KEY `idx_channel` (`send_channel`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
DEFAULT CHARSET = utf8mb4
COLLATE = utf8mb4_unicode_ci COMMENT ='消息模板信息';
DEFAULT CHARSET = utf8mb4 COMMENT ='消息模板信息';
CREATE TABLE `sms_record`
CREATE TABLE IF NOT EXISTS `sms_record`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`message_template_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '消息模板ID',
`phone` bigint(20) NOT NULL DEFAULT '0' COMMENT '手机号',
`supplier_id` tinyint(4) NOT NULL DEFAULT '0' COMMENT '发送短信渠道商的ID',
`supplier_name` varchar(40) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '发送短信渠道商的名称',
`msg_content` varchar(600) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '短信发送的内容',
`series_id` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '下发批次的ID',
`charging_num` tinyint(4) NOT NULL DEFAULT '0' COMMENT '计费条数',
`report_content` varchar(50) NOT NULL DEFAULT '' COMMENT '回执内容',
`status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '短信状态: 10.发送 20.成功 30.失败',
`send_date` int(11) NOT NULL DEFAULT '0' COMMENT '发送日期20211112',
`created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT,
`message_template_id` BIGINT(20) NOT NULL DEFAULT '0' COMMENT '消息模板ID',
`phone` BIGINT(20) NOT NULL DEFAULT '0' COMMENT '手机号',
`supplier_id` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '发送短信渠道商的ID',
`supplier_name` VARCHAR(40) NOT NULL DEFAULT '' COMMENT '发送短信渠道商的名称',
`msg_content` VARCHAR(600) NOT NULL DEFAULT '' COMMENT '短信发送的内容',
`series_id` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '下发批次的ID',
`charging_num` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '计费条数',
`report_content` VARCHAR(50) NOT NULL DEFAULT '' COMMENT '回执内容',
`status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '短信状态: 10.发送 20.成功 30.失败',
`send_date` INT(11) NOT NULL DEFAULT '0' COMMENT '发送日期20211112',
`created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `idx_send_date` (`send_date`)
KEY `idx_send_date` (`send_date`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
DEFAULT CHARSET = utf8mb4
COLLATE = utf8mb4_unicode_ci COMMENT ='短信记录信息';
DEFAULT CHARSET = utf8mb4 COMMENT ='短信记录信息';
drop table IF EXISTS channel_account;
CREATE TABLE `channel_account`
CREATE TABLE IF NOT EXISTS `channel_account`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`name` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账号名称',
`send_channel` tinyint(4) NOT NULL DEFAULT '0' COMMENT '消息发送渠道10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ',
`account_config` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账号配置',
`creator` varchar(128) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT 'Java3y' COMMENT '拥有者',
`created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
`is_deleted` tinyint(4) NOT NULL DEFAULT '0' COMMENT '是否删除0.不删除 1.删除',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT,
`name` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '账号名称',
`send_channel` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '消息发送渠道10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ',
`account_config` VARCHAR(1024) NOT NULL DEFAULT '' COMMENT '账号配置',
`creator` VARCHAR(128) NOT NULL DEFAULT 'Java3y' COMMENT '拥有者',
`created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间',
`updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间',
`is_deleted` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '是否删除0.不删除 1.删除',
PRIMARY KEY (`id`),
KEY `idx_send_channel` (`send_channel`)
KEY `idx_send_channel` (`send_channel`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
DEFAULT CHARSET = utf8mb4
COLLATE = utf8mb4_unicode_ci COMMENT ='渠道账号信息';
DEFAULT CHARSET = utf8mb4 COMMENT ='渠道账号信息';

@ -1,192 +1,202 @@
drop database if exists nacos_config;
create database nacos_config;
use nacos_config;
DROP DATABASE IF EXISTS `nacos_config`;
CREATE TABLE `config_info`
CREATE DATABASE `nacos_config`;
USE `nacos_config`;
CREATE TABLE IF NOT EXISTS `config_info`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`c_desc` varchar(256) DEFAULT NULL,
`c_use` varchar(64) DEFAULT NULL,
`effect` varchar(64) DEFAULT NULL,
`type` varchar(64) DEFAULT NULL,
`encrypted_data_key` varchar(255) DEFAULT NULL,
`c_schema` text,
`id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` VARCHAR(255) NOT NULL COMMENT 'data_id',
`group_id` VARCHAR(255) DEFAULT NULL COMMENT 'group_id',
`content` LONGTEXT NOT NULL COMMENT 'content',
`md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` TEXT COMMENT 'source user',
`src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip',
`app_name` VARCHAR(128) DEFAULT NULL,
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段',
`c_desc` VARCHAR(256) DEFAULT NULL,
`c_use` VARCHAR(64) DEFAULT NULL,
`effect` VARCHAR(64) DEFAULT NULL,
`type` VARCHAR(64) DEFAULT NULL,
`encrypted_data_key` VARCHAR(255) DEFAULT NULL,
`c_schema` TEXT,
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='config_info';
CREATE TABLE `config_info_aggr`
CREATE TABLE IF NOT EXISTS `config_info_aggr`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) NOT NULL COMMENT 'group_id',
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
`content` longtext NOT NULL COMMENT '内容',
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` VARCHAR(255) NOT NULL COMMENT 'data_id',
`group_id` VARCHAR(255) NOT NULL COMMENT 'group_id',
`datum_id` VARCHAR(255) NOT NULL COMMENT 'datum_id',
`content` LONGTEXT NOT NULL COMMENT '内容',
`gmt_modified` DATETIME NOT NULL COMMENT '修改时间',
`app_name` VARCHAR(128) DEFAULT NULL,
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`, `group_id`, `tenant_id`, `datum_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='增加租户字段';
CREATE TABLE `config_info_beta`
CREATE TABLE IF NOT EXISTS `config_info_beta`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` VARCHAR(255) NOT NULL COMMENT 'data_id',
`group_id` VARCHAR(128) NOT NULL COMMENT 'group_id',
`app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name',
`content` LONGTEXT NOT NULL COMMENT 'content',
`beta_ips` VARCHAR(1024) DEFAULT NULL COMMENT 'betaIps',
`md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` TEXT COMMENT 'source user',
`src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip',
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`, `group_id`, `tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='config_info_beta';
CREATE TABLE `config_info_tag`
CREATE TABLE IF NOT EXISTS `config_info_tag`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` VARCHAR(255) NOT NULL COMMENT 'data_id',
`group_id` VARCHAR(128) NOT NULL COMMENT 'group_id',
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id',
`tag_id` VARCHAR(128) NOT NULL COMMENT 'tag_id',
`app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name',
`content` LONGTEXT NOT NULL COMMENT 'content',
`md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` TEXT COMMENT 'source user',
`src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`, `group_id`, `tenant_id`, `tag_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='config_info_tag';
CREATE TABLE `config_tags_relation`
CREATE TABLE IF NOT EXISTS `config_tags_relation`
(
`id` bigint(20) NOT NULL COMMENT 'id',
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`nid` bigint(20) NOT NULL AUTO_INCREMENT,
`id` BIGINT(20) NOT NULL COMMENT 'id',
`tag_name` VARCHAR(128) NOT NULL COMMENT 'tag_name',
`tag_type` VARCHAR(64) DEFAULT NULL COMMENT 'tag_type',
`data_id` VARCHAR(255) NOT NULL COMMENT 'data_id',
`group_id` VARCHAR(128) NOT NULL COMMENT 'group_id',
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id',
`nid` BIGINT(20) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`nid`),
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`, `tag_name`, `tag_type`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='config_tag_relation';
CREATE TABLE `group_capacity`
CREATE TABLE IF NOT EXISTS `group_capacity`
(
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID空字符表示整个集群',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数0表示使用默认值',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`group_id` VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'Group ID空字符表示整个集群',
`quota` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数0表示使用默认值',
`max_aggr_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_group_id` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='集群、各Group容量信息表';
CREATE TABLE `his_config_info`
CREATE TABLE IF NOT EXISTS `his_config_info`
(
`id` bigint(64) unsigned NOT NULL,
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`data_id` varchar(255) NOT NULL,
`group_id` varchar(128) NOT NULL,
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL,
`md5` varchar(32) DEFAULT NULL,
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`src_user` text,
`src_ip` varchar(50) DEFAULT NULL,
`op_type` char(10) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`encrypted_data_key` varchar(255) DEFAULT NULL,
`id` BIGINT(64) UNSIGNED NOT NULL,
`nid` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT,
`data_id` VARCHAR(255) NOT NULL,
`group_id` VARCHAR(128) NOT NULL,
`app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name',
`content` LONGTEXT NOT NULL,
`md5` VARCHAR(32) DEFAULT NULL,
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
`src_user` TEXT,
`src_ip` VARCHAR(50) DEFAULT NULL,
`op_type` CHAR(10) DEFAULT NULL,
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段',
`encrypted_data_key` VARCHAR(255) DEFAULT NULL,
PRIMARY KEY (`nid`),
KEY `idx_gmt_create` (`gmt_create`),
KEY `idx_gmt_modified` (`gmt_modified`),
KEY `idx_did` (`data_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
KEY `idx_gmt_create` (`gmt_create`),
KEY `idx_gmt_modified` (`gmt_modified`),
KEY `idx_did` (`data_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='多租户改造';
CREATE TABLE `tenant_capacity`
CREATE TABLE IF NOT EXISTS `tenant_capacity`
(
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`tenant_id` VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
`quota` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
`max_aggr_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='租户容量信息表';
CREATE TABLE `tenant_info`
CREATE TABLE IF NOT EXISTS `tenant_info`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`kp` varchar(128) NOT NULL COMMENT 'kp',
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
`id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`kp` VARCHAR(128) NOT NULL COMMENT 'kp',
`tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id',
`tenant_name` VARCHAR(128) DEFAULT '' COMMENT 'tenant_name',
`tenant_desc` VARCHAR(256) DEFAULT NULL COMMENT 'tenant_desc',
`create_source` VARCHAR(32) DEFAULT NULL COMMENT 'create_source',
`gmt_create` BIGINT(20) NOT NULL COMMENT '创建时间',
`gmt_modified` BIGINT(20) NOT NULL COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`, `tenant_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='tenant_info';
CREATE TABLE `users`
CREATE TABLE IF NOT EXISTS `users`
(
`username` varchar(50) NOT NULL PRIMARY KEY,
`password` varchar(500) NOT NULL,
`enabled` boolean NOT NULL
);
`username` VARCHAR(50) NOT NULL PRIMARY KEY,
`password` VARCHAR(500) NOT NULL,
`enabled` BOOLEAN NOT NULL
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='users';
CREATE TABLE `roles`
CREATE TABLE IF NOT EXISTS `roles`
(
`username` varchar(50) NOT NULL,
`role` varchar(50) NOT NULL,
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
);
`username` VARCHAR(50) NOT NULL,
`role` VARCHAR(50) NOT NULL,
UNIQUE INDEX `idx_user_role` (`username`, `role`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='roles';
CREATE TABLE `permissions`
CREATE TABLE IF NOT EXISTS `permissions`
(
`role` varchar(50) NOT NULL,
`resource` varchar(255) NOT NULL,
`action` varchar(8) NOT NULL,
UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
);
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
`role` VARCHAR(50) NOT NULL,
`resource` VARCHAR(255) NOT NULL,
`action` VARCHAR(8) NOT NULL,
UNIQUE INDEX `uk_role_permission` (`role`, `resource`, `action`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='permissions';
INSERT INTO users (username, password, enabled)
VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role)
VALUES ('nacos', 'ROLE_ADMIN');

@ -1,125 +1,145 @@
CREATE database if NOT EXISTS `xxl_job` default character set utf8mb4 collate utf8mb4_unicode_ci;
use `xxl_job`;
DROP DATABASE IF EXISTS `xxl_job`;
SET NAMES utf8mb4;
CREATE DATABASE `xxl_job`;
drop table IF EXISTS xxl_job_info;
USE `xxl_job`;
CREATE TABLE `xxl_job_info` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_desc` varchar(255) NOT NULL,
`add_time` datetime DEFAULT NULL,
`update_time` datetime DEFAULT NULL,
`author` varchar(64) DEFAULT NULL COMMENT '作者',
`alarm_email` varchar(255) DEFAULT NULL COMMENT '报警邮件',
`schedule_type` varchar(50) NOT NULL DEFAULT 'NONE' COMMENT '调度类型',
`schedule_conf` varchar(128) DEFAULT NULL COMMENT '调度配置,值含义取决于调度类型',
`misfire_strategy` varchar(50) NOT NULL DEFAULT 'DO_NOTHING' COMMENT '调度过期策略',
`executor_route_strategy` varchar(50) DEFAULT NULL COMMENT '执行器路由策略',
`executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_block_strategy` varchar(50) DEFAULT NULL COMMENT '阻塞处理策略',
`executor_timeout` int(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位秒',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`glue_type` varchar(50) NOT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext COMMENT 'GLUE源代码',
`glue_remark` varchar(128) DEFAULT NULL COMMENT 'GLUE备注',
`glue_updatetime` datetime DEFAULT NULL COMMENT 'GLUE更新时间',
`child_jobid` varchar(255) DEFAULT NULL COMMENT '子任务ID多个逗号分隔',
`trigger_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '调度状态0-停止1-运行',
`trigger_last_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '上次调度时间',
`trigger_next_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '下次调度时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_info`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`job_group` INT(11) NOT NULL COMMENT '执行器主键ID',
`job_desc` VARCHAR(255) NOT NULL,
`add_time` DATETIME DEFAULT NULL,
`update_time` DATETIME DEFAULT NULL,
`author` VARCHAR(64) DEFAULT NULL COMMENT '作者',
`alarm_email` VARCHAR(255) DEFAULT NULL COMMENT '报警邮件',
`schedule_type` VARCHAR(50) NOT NULL DEFAULT 'NONE' COMMENT '调度类型',
`schedule_conf` VARCHAR(128) DEFAULT NULL COMMENT '调度配置,值含义取决于调度类型',
`misfire_strategy` VARCHAR(50) NOT NULL DEFAULT 'DO_NOTHING' COMMENT '调度过期策略',
`executor_route_strategy` VARCHAR(50) DEFAULT NULL COMMENT '执行器路由策略',
`executor_handler` VARCHAR(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` VARCHAR(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_block_strategy` VARCHAR(50) DEFAULT NULL COMMENT '阻塞处理策略',
`executor_timeout` INT(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位秒',
`executor_fail_retry_count` INT(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`glue_type` VARCHAR(50) NOT NULL COMMENT 'GLUE类型',
`glue_source` MEDIUMTEXT COMMENT 'GLUE源代码',
`glue_remark` VARCHAR(128) DEFAULT NULL COMMENT 'GLUE备注',
`glue_updatetime` DATETIME DEFAULT NULL COMMENT 'GLUE更新时间',
`child_jobid` VARCHAR(255) DEFAULT NULL COMMENT '子任务ID多个逗号分隔',
`trigger_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '调度状态0-停止1-运行',
`trigger_last_time` BIGINT(13) NOT NULL DEFAULT '0' COMMENT '上次调度时间',
`trigger_next_time` BIGINT(13) NOT NULL DEFAULT '0' COMMENT '下次调度时间',
PRIMARY KEY (`id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_info';
drop table IF EXISTS xxl_job_log;
CREATE TABLE `xxl_job_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`executor_address` varchar(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址',
`executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_sharding_param` varchar(20) DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`trigger_time` datetime DEFAULT NULL COMMENT '调度-时间',
`trigger_code` int(11) NOT NULL COMMENT '调度-结果',
`trigger_msg` text COMMENT '调度-日志',
`handle_time` datetime DEFAULT NULL COMMENT '执行-时间',
`handle_code` int(11) NOT NULL COMMENT '执行-状态',
`handle_msg` text COMMENT '执行-日志',
`alarm_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '告警状态0-默认、1-无需告警、2-告警成功、3-告警失败',
PRIMARY KEY (`id`),
KEY `I_trigger_time` (`trigger_time`),
KEY `I_handle_code` (`handle_code`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_log`
(
`id` BIGINT(20) NOT NULL AUTO_INCREMENT,
`job_group` INT(11) NOT NULL COMMENT '执行器主键ID',
`job_id` INT(11) NOT NULL COMMENT '任务主键ID',
`executor_address` VARCHAR(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址',
`executor_handler` VARCHAR(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` VARCHAR(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_sharding_param` VARCHAR(20) DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2',
`executor_fail_retry_count` INT(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`trigger_time` DATETIME DEFAULT NULL COMMENT '调度-时间',
`trigger_code` INT(11) NOT NULL COMMENT '调度-结果',
`trigger_msg` TEXT COMMENT '调度-日志',
`handle_time` DATETIME DEFAULT NULL COMMENT '执行-时间',
`handle_code` INT(11) NOT NULL COMMENT '执行-状态',
`handle_msg` TEXT COMMENT '执行-日志',
`alarm_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '告警状态0-默认、1-无需告警、2-告警成功、3-告警失败',
PRIMARY KEY (`id`),
KEY `I_trigger_time` (`trigger_time`),
KEY `I_handle_code` (`handle_code`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_log';
drop table IF EXISTS xxl_job_log_report;
CREATE TABLE `xxl_job_log_report` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`trigger_day` datetime DEFAULT NULL COMMENT '调度-时间',
`running_count` int(11) NOT NULL DEFAULT '0' COMMENT '运行中-日志数量',
`suc_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行成功-日志数量',
`fail_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行失败-日志数量',
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `i_trigger_day` (`trigger_day`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_log_report`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`trigger_day` DATETIME DEFAULT NULL COMMENT '调度-时间',
`running_count` INT(11) NOT NULL DEFAULT '0' COMMENT '运行中-日志数量',
`suc_count` INT(11) NOT NULL DEFAULT '0' COMMENT '执行成功-日志数量',
`fail_count` INT(11) NOT NULL DEFAULT '0' COMMENT '执行失败-日志数量',
`update_time` DATETIME DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `i_trigger_day` (`trigger_day`) USING BTREE
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_log_report';
drop table IF EXISTS xxl_job_logglue;
CREATE TABLE `xxl_job_logglue` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`glue_type` varchar(50) DEFAULT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext COMMENT 'GLUE源代码',
`glue_remark` varchar(128) NOT NULL COMMENT 'GLUE备注',
`add_time` datetime DEFAULT NULL,
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_logglue`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`job_id` INT(11) NOT NULL COMMENT '任务主键ID',
`glue_type` VARCHAR(50) DEFAULT NULL COMMENT 'GLUE类型',
`glue_source` MEDIUMTEXT COMMENT 'GLUE源代码',
`glue_remark` VARCHAR(128) NOT NULL COMMENT 'GLUE备注',
`add_time` DATETIME DEFAULT NULL,
`update_time` DATETIME DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_logglue';
drop table IF EXISTS xxl_job_registry;
CREATE TABLE `xxl_job_registry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`registry_group` varchar(50) NOT NULL,
`registry_key` varchar(255) NOT NULL,
`registry_value` varchar(255) NOT NULL,
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `i_g_k_v` (`registry_group`,`registry_key`,`registry_value`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_registry`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`registry_group` VARCHAR(50) NOT NULL,
`registry_key` VARCHAR(255) NOT NULL,
`registry_value` VARCHAR(255) NOT NULL,
`update_time` DATETIME DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `i_g_k_v` (`registry_group`, `registry_key`, `registry_value`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_registry';
drop table IF EXISTS xxl_job_group;
CREATE TABLE `xxl_job_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`app_name` varchar(64) NOT NULL COMMENT '执行器AppName',
`title` varchar(12) NOT NULL COMMENT '执行器名称',
`address_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '执行器地址类型0=自动注册、1=手动录入',
`address_list` text COMMENT '执行器地址列表,多地址逗号分隔',
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_group`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`app_name` VARCHAR(64) NOT NULL COMMENT '执行器AppName',
`title` VARCHAR(12) NOT NULL COMMENT '执行器名称',
`address_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '执行器地址类型0=自动注册、1=手动录入',
`address_list` TEXT COMMENT '执行器地址列表,多地址逗号分隔',
`update_time` DATETIME DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_group';
drop table IF EXISTS xxl_job_user;
CREATE TABLE `xxl_job_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(50) NOT NULL COMMENT '账号',
`password` varchar(50) NOT NULL COMMENT '密码',
`role` tinyint(4) NOT NULL COMMENT '角色0-普通用户、1-管理员',
`permission` varchar(255) DEFAULT NULL COMMENT '权限执行器ID列表多个逗号分割',
PRIMARY KEY (`id`),
UNIQUE KEY `i_username` (`username`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_user`
(
`id` INT(11) NOT NULL AUTO_INCREMENT,
`username` VARCHAR(50) NOT NULL COMMENT '账号',
`password` VARCHAR(50) NOT NULL COMMENT '密码',
`role` TINYINT(4) NOT NULL COMMENT '角色0-普通用户、1-管理员',
`permission` VARCHAR(255) DEFAULT NULL COMMENT '权限执行器ID列表多个逗号分割',
PRIMARY KEY (`id`),
UNIQUE KEY `i_username` (`username`) USING BTREE
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_user';
drop table IF EXISTS xxl_job_lock;
CREATE TABLE `xxl_job_lock` (
`lock_name` varchar(50) NOT NULL COMMENT '锁名称',
PRIMARY KEY (`lock_name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE IF NOT EXISTS `xxl_job_lock`
(
`lock_name` VARCHAR(50) NOT NULL COMMENT '锁名称',
PRIMARY KEY (`lock_name`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_lock';
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`, `update_time`) VALUES (1, 'xxl-job-executor-sample', '示例执行器', 0, NULL, '2018-11-03 22:21:31' );
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `schedule_type`, `schedule_conf`, `misfire_strategy`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`) VALUES (1, 1, '测试任务1', '2018-11-03 22:21:31', '2018-11-03 22:21:31', 'XXL', '', 'CRON', '0 0 0 * * ? *', 'DO_NOTHING', 'FIRST', 'demoJobHandler', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2018-11-03 22:21:31', '');
INSERT INTO `xxl_job_user`(`id`, `username`, `password`, `role`, `permission`) VALUES (1, 'admin', 'e10adc3949ba59abbe56e057f20f883e', 1, NULL);
INSERT INTO `xxl_job_lock` ( `lock_name`) VALUES ( 'schedule_lock');
INSERT INTO `xxl_job_group` (`id`, `app_name`, `title`, `address_type`, `address_list`, `update_time`)
VALUES (1, 'xxl-job-executor-sample', '示例执行器', 0, NULL, '2018-11-03 22:21:31');
INSERT INTO `xxl_job_info` (`id`, `job_group`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`,
`schedule_type`, `schedule_conf`, `misfire_strategy`, `executor_route_strategy`,
`executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`,
`executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`,
`child_jobid`)
VALUES (1, 1, '测试任务1', '2018-11-03 22:21:31', '2018-11-03 22:21:31', 'XXL', '', 'CRON', '0 0 0 * * ? *',
'DO_NOTHING', 'FIRST', 'demoJobHandler', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化',
'2018-11-03 22:21:31', '');
INSERT INTO xxl_job_user (id, username, password, role, permission)
VALUES (1, 'admin', 'e10adc3949ba59abbe56e057f20f883e', 1, NULL);
INSERT INTO xxl_job_lock (lock_name)
VALUES ('schedule_lock');

@ -123,9 +123,9 @@ services:
- /home/nacos/single-logs/nacos-server:/home/nacos/logs
- /home/nacos/init.d:/home/nacos/init.d
ports:
- 8848:8848
- 9848:9848
- 9849:9849
- "8848:8848"
- "9848:9848"
- "9849:9849"
depends_on:
- austin-mysql
restart: on-failure

Loading…
Cancel
Save