From 4a73ea84391c5c22cd146d4550b24105c33de059 Mon Sep 17 00:00:00 2001 From: xiaoxiamo <82970607@qq.com> Date: Sun, 14 Jul 2024 21:17:32 +0800 Subject: [PATCH 1/5] =?UTF-8?q?style:=20=E8=A7=84=E8=8C=83=E5=8C=96?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=88=E4=B8=80=EF=BC=89=EF=BC=9A=E6=95=B4?= =?UTF-8?q?=E7=90=86SQL=E8=84=9A=E6=9C=AC=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/sql/austin.sql | 126 ++++++++--------- doc/sql/nacos.sql | 322 +++++++++++++++++++++++--------------------- doc/sql/xxl-job.sql | 246 +++++++++++++++++---------------- 3 files changed, 355 insertions(+), 339 deletions(-) diff --git a/doc/sql/austin.sql b/doc/sql/austin.sql index 10f96be..15d0fe7 100644 --- a/doc/sql/austin.sql +++ b/doc/sql/austin.sql @@ -1,84 +1,70 @@ -drop database if exists austin; +DROP DATABASE IF EXISTS `austin`; -create database austin; +CREATE DATABASE `austin`; -use austin; +USE `austin`; -drop table IF EXISTS message_template; - -CREATE TABLE `message_template` +CREATE TABLE IF NOT EXISTS `message_template` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `name` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '标题', - `audit_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '当前消息审核状态: 10.待审核 20.审核成功 30.被拒绝', - `flow_id` varchar(50) COLLATE utf8mb4_unicode_ci COMMENT '工单ID', - `msg_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '当前消息状态:10.新建 20.停用 30.启用 40.等待发送 50.发送中 60.发送成功 70.发送失败', - `cron_task_id` bigint(20) COMMENT '定时任务Id (xxl-job-admin返回)', - `cron_crowd_path` varchar(500) COMMENT '定时发送人群的文件路径', - `expect_push_time` varchar(100) COLLATE utf8mb4_unicode_ci COMMENT '期望发送时间:0:立即发送 定时任务以及周期任务:cron表达式', - `id_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '消息的发送ID类型:10. userId 20.did 30.手机号 40.openId 50.email 60.企业微信userId', - `send_channel` int(10) NOT NULL DEFAULT '0' COMMENT '消息发送渠道:10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ', - `template_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.运营类 20.技术类接口调用', - `msg_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.通知类消息 20.营销类消息 30.验证码类消息', - `shield_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '10.夜间不屏蔽 20.夜间屏蔽 30.夜间屏蔽(次日早上9点发送)', - `msg_content` varchar(4096) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '消息内容 占位符用{$var}表示', - `send_account` int(10) NOT NULL DEFAULT '0' COMMENT '发送账号 一个渠道下可存在多个账号', - `creator` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '创建者', - `updator` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '更新者', - `auditor` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '审核人', - `team` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '业务方团队', - `proposer` varchar(45) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '业务方', - `is_deleted` tinyint(4) NOT NULL DEFAULT '0' COMMENT '是否删除:0.不删除 1.删除', - `created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间', - `updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT, + `name` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '标题', + `audit_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '当前消息审核状态: 10.待审核 20.审核成功 30.被拒绝', + `flow_id` VARCHAR(50) COMMENT '工单ID', + `msg_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '当前消息状态:10.新建 20.停用 30.启用 40.等待发送 50.发送中 60.发送成功 70.发送失败', + `cron_task_id` BIGINT(20) COMMENT '定时任务Id (xxl-job-admin返回)', + `cron_crowd_path` VARCHAR(500) COMMENT '定时发送人群的文件路径', + `expect_push_time` VARCHAR(100) COMMENT '期望发送时间:0:立即发送 定时任务以及周期任务:cron表达式', + `id_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '消息的发送ID类型:10. userId 20.did 30.手机号 40.openId 50.email 60.企业微信userId', + `send_channel` INT(10) NOT NULL DEFAULT '0' COMMENT '消息发送渠道:10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ', + `template_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.运营类 20.技术类接口调用', + `msg_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.通知类消息 20.营销类消息 30.验证码类消息', + `shield_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '10.夜间不屏蔽 20.夜间屏蔽 30.夜间屏蔽(次日早上9点发送)', + `msg_content` VARCHAR(4096) NOT NULL DEFAULT '' COMMENT '消息内容 占位符用{$var}表示', + `send_account` INT(10) NOT NULL DEFAULT '0' COMMENT '发送账号 一个渠道下可存在多个账号', + `creator` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '创建者', + `updator` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '更新者', + `auditor` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '审核人', + `team` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '业务方团队', + `proposer` VARCHAR(45) NOT NULL DEFAULT '' COMMENT '业务方', + `is_deleted` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '是否删除:0.不删除 1.删除', + `created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间', + `updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间', PRIMARY KEY (`id`), - KEY `idx_channel` (`send_channel`) + KEY `idx_channel` (`send_channel`) ) ENGINE = InnoDB - AUTO_INCREMENT = 1 - DEFAULT CHARSET = utf8mb4 - COLLATE = utf8mb4_unicode_ci COMMENT ='消息模板信息'; - - + DEFAULT CHARSET = utf8mb4 COMMENT ='消息模板信息'; -CREATE TABLE `sms_record` +CREATE TABLE IF NOT EXISTS `sms_record` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `message_template_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '消息模板ID', - `phone` bigint(20) NOT NULL DEFAULT '0' COMMENT '手机号', - `supplier_id` tinyint(4) NOT NULL DEFAULT '0' COMMENT '发送短信渠道商的ID', - `supplier_name` varchar(40) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '发送短信渠道商的名称', - `msg_content` varchar(600) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '短信发送的内容', - `series_id` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '下发批次的ID', - `charging_num` tinyint(4) NOT NULL DEFAULT '0' COMMENT '计费条数', - `report_content` varchar(50) NOT NULL DEFAULT '' COMMENT '回执内容', - `status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '短信状态: 10.发送 20.成功 30.失败', - `send_date` int(11) NOT NULL DEFAULT '0' COMMENT '发送日期:20211112', - `created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间', - `updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT, + `message_template_id` BIGINT(20) NOT NULL DEFAULT '0' COMMENT '消息模板ID', + `phone` BIGINT(20) NOT NULL DEFAULT '0' COMMENT '手机号', + `supplier_id` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '发送短信渠道商的ID', + `supplier_name` VARCHAR(40) NOT NULL DEFAULT '' COMMENT '发送短信渠道商的名称', + `msg_content` VARCHAR(600) NOT NULL DEFAULT '' COMMENT '短信发送的内容', + `series_id` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '下发批次的ID', + `charging_num` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '计费条数', + `report_content` VARCHAR(50) NOT NULL DEFAULT '' COMMENT '回执内容', + `status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '短信状态: 10.发送 20.成功 30.失败', + `send_date` INT(11) NOT NULL DEFAULT '0' COMMENT '发送日期:20211112', + `created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间', + `updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间', PRIMARY KEY (`id`), - KEY `idx_send_date` (`send_date`) + KEY `idx_send_date` (`send_date`) ) ENGINE = InnoDB - AUTO_INCREMENT = 1 - DEFAULT CHARSET = utf8mb4 - COLLATE = utf8mb4_unicode_ci COMMENT ='短信记录信息'; + DEFAULT CHARSET = utf8mb4 COMMENT ='短信记录信息'; -drop table IF EXISTS channel_account; - - -CREATE TABLE `channel_account` +CREATE TABLE IF NOT EXISTS `channel_account` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `name` varchar(100) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账号名称', - `send_channel` tinyint(4) NOT NULL DEFAULT '0' COMMENT '消息发送渠道:10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ', - `account_config` varchar(1024) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT '' COMMENT '账号配置', - `creator` varchar(128) COLLATE utf8mb4_unicode_ci NOT NULL DEFAULT 'Java3y' COMMENT '拥有者', - `created` int(11) NOT NULL DEFAULT '0' COMMENT '创建时间', - `updated` int(11) NOT NULL DEFAULT '0' COMMENT '更新时间', - `is_deleted` tinyint(4) NOT NULL DEFAULT '0' COMMENT '是否删除:0.不删除 1.删除', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT, + `name` VARCHAR(100) NOT NULL DEFAULT '' COMMENT '账号名称', + `send_channel` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '消息发送渠道:10.IM 20.Push 30.短信 40.Email 50.公众号 60.小程序 70.企业微信 80.钉钉机器人 90.钉钉工作通知 100.企业微信机器人 110.飞书机器人 110. 飞书应用消息 ', + `account_config` VARCHAR(1024) NOT NULL DEFAULT '' COMMENT '账号配置', + `creator` VARCHAR(128) NOT NULL DEFAULT 'Java3y' COMMENT '拥有者', + `created` INT(11) NOT NULL DEFAULT '0' COMMENT '创建时间', + `updated` INT(11) NOT NULL DEFAULT '0' COMMENT '更新时间', + `is_deleted` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '是否删除:0.不删除 1.删除', PRIMARY KEY (`id`), - KEY `idx_send_channel` (`send_channel`) + KEY `idx_send_channel` (`send_channel`) ) ENGINE = InnoDB - AUTO_INCREMENT = 1 - DEFAULT CHARSET = utf8mb4 - COLLATE = utf8mb4_unicode_ci COMMENT ='渠道账号信息'; - + DEFAULT CHARSET = utf8mb4 COMMENT ='渠道账号信息'; \ No newline at end of file diff --git a/doc/sql/nacos.sql b/doc/sql/nacos.sql index 2e68956..5343b95 100644 --- a/doc/sql/nacos.sql +++ b/doc/sql/nacos.sql @@ -1,192 +1,202 @@ -drop database if exists nacos_config; -create database nacos_config; -use nacos_config; +DROP DATABASE IF EXISTS `nacos_config`; -CREATE TABLE `config_info` +CREATE DATABASE `nacos_config`; + +USE `nacos_config`; + +CREATE TABLE IF NOT EXISTS `config_info` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', - `data_id` varchar(255) NOT NULL COMMENT 'data_id', - `group_id` varchar(255) DEFAULT NULL, - `content` longtext NOT NULL COMMENT 'content', - `md5` varchar(32) DEFAULT NULL COMMENT 'md5', - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', - `src_user` text COMMENT 'source user', - `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', - `app_name` varchar(128) DEFAULT NULL, - `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', - `c_desc` varchar(256) DEFAULT NULL, - `c_use` varchar(64) DEFAULT NULL, - `effect` varchar(64) DEFAULT NULL, - `type` varchar(64) DEFAULT NULL, - `encrypted_data_key` varchar(255) DEFAULT NULL, - `c_schema` text, + `id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` VARCHAR(255) NOT NULL COMMENT 'data_id', + `group_id` VARCHAR(255) DEFAULT NULL COMMENT 'group_id', + `content` LONGTEXT NOT NULL COMMENT 'content', + `md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` TEXT COMMENT 'source user', + `src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip', + `app_name` VARCHAR(128) DEFAULT NULL, + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段', + `c_desc` VARCHAR(256) DEFAULT NULL, + `c_use` VARCHAR(64) DEFAULT NULL, + `effect` VARCHAR(64) DEFAULT NULL, + `type` VARCHAR(64) DEFAULT NULL, + `encrypted_data_key` VARCHAR(255) DEFAULT NULL, + `c_schema` TEXT, PRIMARY KEY (`id`), - UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info'; + UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`, `group_id`, `tenant_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='config_info'; -CREATE TABLE `config_info_aggr` +CREATE TABLE IF NOT EXISTS `config_info_aggr` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', - `data_id` varchar(255) NOT NULL COMMENT 'data_id', - `group_id` varchar(255) NOT NULL COMMENT 'group_id', - `datum_id` varchar(255) NOT NULL COMMENT 'datum_id', - `content` longtext NOT NULL COMMENT '内容', - `gmt_modified` datetime NOT NULL COMMENT '修改时间', - `app_name` varchar(128) DEFAULT NULL, - `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` VARCHAR(255) NOT NULL COMMENT 'data_id', + `group_id` VARCHAR(255) NOT NULL COMMENT 'group_id', + `datum_id` VARCHAR(255) NOT NULL COMMENT 'datum_id', + `content` LONGTEXT NOT NULL COMMENT '内容', + `gmt_modified` DATETIME NOT NULL COMMENT '修改时间', + `app_name` VARCHAR(128) DEFAULT NULL, + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段', PRIMARY KEY (`id`), - UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段'; - + UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`, `group_id`, `tenant_id`, `datum_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='增加租户字段'; - -CREATE TABLE `config_info_beta` +CREATE TABLE IF NOT EXISTS `config_info_beta` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', - `data_id` varchar(255) NOT NULL COMMENT 'data_id', - `group_id` varchar(128) NOT NULL COMMENT 'group_id', - `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', - `content` longtext NOT NULL COMMENT 'content', - `beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps', - `md5` varchar(32) DEFAULT NULL COMMENT 'md5', - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', - `src_user` text COMMENT 'source user', - `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', - `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` VARCHAR(255) NOT NULL COMMENT 'data_id', + `group_id` VARCHAR(128) NOT NULL COMMENT 'group_id', + `app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name', + `content` LONGTEXT NOT NULL COMMENT 'content', + `beta_ips` VARCHAR(1024) DEFAULT NULL COMMENT 'betaIps', + `md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` TEXT COMMENT 'source user', + `src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip', + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段', PRIMARY KEY (`id`), - UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta'; - + UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`, `group_id`, `tenant_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='config_info_beta'; -CREATE TABLE `config_info_tag` +CREATE TABLE IF NOT EXISTS `config_info_tag` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', - `data_id` varchar(255) NOT NULL COMMENT 'data_id', - `group_id` varchar(128) NOT NULL COMMENT 'group_id', - `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id', - `tag_id` varchar(128) NOT NULL COMMENT 'tag_id', - `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', - `content` longtext NOT NULL COMMENT 'content', - `md5` varchar(32) DEFAULT NULL COMMENT 'md5', - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', - `src_user` text COMMENT 'source user', - `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `data_id` VARCHAR(255) NOT NULL COMMENT 'data_id', + `group_id` VARCHAR(128) NOT NULL COMMENT 'group_id', + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id', + `tag_id` VARCHAR(128) NOT NULL COMMENT 'tag_id', + `app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name', + `content` LONGTEXT NOT NULL COMMENT 'content', + `md5` VARCHAR(32) DEFAULT NULL COMMENT 'md5', + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `src_user` TEXT COMMENT 'source user', + `src_ip` VARCHAR(50) DEFAULT NULL COMMENT 'source ip', PRIMARY KEY (`id`), - UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag'; + UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`, `group_id`, `tenant_id`, `tag_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='config_info_tag'; - -CREATE TABLE `config_tags_relation` +CREATE TABLE IF NOT EXISTS `config_tags_relation` ( - `id` bigint(20) NOT NULL COMMENT 'id', - `tag_name` varchar(128) NOT NULL COMMENT 'tag_name', - `tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type', - `data_id` varchar(255) NOT NULL COMMENT 'data_id', - `group_id` varchar(128) NOT NULL COMMENT 'group_id', - `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id', - `nid` bigint(20) NOT NULL AUTO_INCREMENT, + `id` BIGINT(20) NOT NULL COMMENT 'id', + `tag_name` VARCHAR(128) NOT NULL COMMENT 'tag_name', + `tag_type` VARCHAR(64) DEFAULT NULL COMMENT 'tag_type', + `data_id` VARCHAR(255) NOT NULL COMMENT 'data_id', + `group_id` VARCHAR(128) NOT NULL COMMENT 'group_id', + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id', + `nid` BIGINT(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`nid`), - UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`), - KEY `idx_tenant_id` (`tenant_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation'; + UNIQUE KEY `uk_configtagrelation_configidtag` (`id`, `tag_name`, `tag_type`), + KEY `idx_tenant_id` (`tenant_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='config_tag_relation'; -CREATE TABLE `group_capacity` +CREATE TABLE IF NOT EXISTS `group_capacity` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID', - `group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群', - `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', - `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量', - `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', - `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值', - `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', - `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID', + `group_id` VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群', + `quota` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', + `usage` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '使用量', + `max_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', + `max_aggr_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值', + `max_aggr_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', + `max_history_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', PRIMARY KEY (`id`), UNIQUE KEY `uk_group_id` (`group_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表'; - +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='集群、各Group容量信息表'; -CREATE TABLE `his_config_info` +CREATE TABLE IF NOT EXISTS `his_config_info` ( - `id` bigint(64) unsigned NOT NULL, - `nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT, - `data_id` varchar(255) NOT NULL, - `group_id` varchar(128) NOT NULL, - `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name', - `content` longtext NOT NULL, - `md5` varchar(32) DEFAULT NULL, - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, - `src_user` text, - `src_ip` varchar(50) DEFAULT NULL, - `op_type` char(10) DEFAULT NULL, - `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段', - `encrypted_data_key` varchar(255) DEFAULT NULL, + `id` BIGINT(64) UNSIGNED NOT NULL, + `nid` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT, + `data_id` VARCHAR(255) NOT NULL, + `group_id` VARCHAR(128) NOT NULL, + `app_name` VARCHAR(128) DEFAULT NULL COMMENT 'app_name', + `content` LONGTEXT NOT NULL, + `md5` VARCHAR(32) DEFAULT NULL, + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `src_user` TEXT, + `src_ip` VARCHAR(50) DEFAULT NULL, + `op_type` CHAR(10) DEFAULT NULL, + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT '租户字段', + `encrypted_data_key` VARCHAR(255) DEFAULT NULL, PRIMARY KEY (`nid`), - KEY `idx_gmt_create` (`gmt_create`), - KEY `idx_gmt_modified` (`gmt_modified`), - KEY `idx_did` (`data_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造'; + KEY `idx_gmt_create` (`gmt_create`), + KEY `idx_gmt_modified` (`gmt_modified`), + KEY `idx_did` (`data_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='多租户改造'; - -CREATE TABLE `tenant_capacity` +CREATE TABLE IF NOT EXISTS `tenant_capacity` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID', - `tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID', - `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', - `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量', - `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', - `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数', - `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', - `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', - `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', + `id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID', + `tenant_id` VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID', + `quota` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值', + `usage` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '使用量', + `max_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值', + `max_aggr_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数', + `max_aggr_size` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值', + `max_history_count` INT(10) UNSIGNED NOT NULL DEFAULT '0' COMMENT '最大变更历史数量', + `gmt_create` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `gmt_modified` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间', PRIMARY KEY (`id`), UNIQUE KEY `uk_tenant_id` (`tenant_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表'; - +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='租户容量信息表'; -CREATE TABLE `tenant_info` +CREATE TABLE IF NOT EXISTS `tenant_info` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id', - `kp` varchar(128) NOT NULL COMMENT 'kp', - `tenant_id` varchar(128) default '' COMMENT 'tenant_id', - `tenant_name` varchar(128) default '' COMMENT 'tenant_name', - `tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc', - `create_source` varchar(32) DEFAULT NULL COMMENT 'create_source', - `gmt_create` bigint(20) NOT NULL COMMENT '创建时间', - `gmt_modified` bigint(20) NOT NULL COMMENT '修改时间', + `id` BIGINT(20) NOT NULL AUTO_INCREMENT COMMENT 'id', + `kp` VARCHAR(128) NOT NULL COMMENT 'kp', + `tenant_id` VARCHAR(128) DEFAULT '' COMMENT 'tenant_id', + `tenant_name` VARCHAR(128) DEFAULT '' COMMENT 'tenant_name', + `tenant_desc` VARCHAR(256) DEFAULT NULL COMMENT 'tenant_desc', + `create_source` VARCHAR(32) DEFAULT NULL COMMENT 'create_source', + `gmt_create` BIGINT(20) NOT NULL COMMENT '创建时间', + `gmt_modified` BIGINT(20) NOT NULL COMMENT '修改时间', PRIMARY KEY (`id`), - UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`), - KEY `idx_tenant_id` (`tenant_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info'; + UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`, `tenant_id`), + KEY `idx_tenant_id` (`tenant_id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='tenant_info'; -CREATE TABLE `users` +CREATE TABLE IF NOT EXISTS `users` ( - `username` varchar(50) NOT NULL PRIMARY KEY, - `password` varchar(500) NOT NULL, - `enabled` boolean NOT NULL -); + `username` VARCHAR(50) NOT NULL PRIMARY KEY, + `password` VARCHAR(500) NOT NULL, + `enabled` BOOLEAN NOT NULL +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='users'; -CREATE TABLE `roles` +CREATE TABLE IF NOT EXISTS `roles` ( - `username` varchar(50) NOT NULL, - `role` varchar(50) NOT NULL, - UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE -); + `username` VARCHAR(50) NOT NULL, + `role` VARCHAR(50) NOT NULL, + UNIQUE INDEX `idx_user_role` (`username`, `role`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='roles'; -CREATE TABLE `permissions` +CREATE TABLE IF NOT EXISTS `permissions` ( - `role` varchar(50) NOT NULL, - `resource` varchar(255) NOT NULL, - `action` varchar(8) NOT NULL, - UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE -); - -INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE); -INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN'); \ No newline at end of file + `role` VARCHAR(50) NOT NULL, + `resource` VARCHAR(255) NOT NULL, + `action` VARCHAR(8) NOT NULL, + UNIQUE INDEX `uk_role_permission` (`role`, `resource`, `action`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='permissions'; + + +INSERT INTO users (username, password, enabled) +VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE); +INSERT INTO roles (username, role) +VALUES ('nacos', 'ROLE_ADMIN'); \ No newline at end of file diff --git a/doc/sql/xxl-job.sql b/doc/sql/xxl-job.sql index e0d45b0..6402237 100644 --- a/doc/sql/xxl-job.sql +++ b/doc/sql/xxl-job.sql @@ -1,125 +1,145 @@ -CREATE database if NOT EXISTS `xxl_job` default character set utf8mb4 collate utf8mb4_unicode_ci; -use `xxl_job`; +DROP DATABASE IF EXISTS `xxl_job`; -SET NAMES utf8mb4; +CREATE DATABASE `xxl_job`; -drop table IF EXISTS xxl_job_info; +USE `xxl_job`; -CREATE TABLE `xxl_job_info` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `job_group` int(11) NOT NULL COMMENT '执行器主键ID', - `job_desc` varchar(255) NOT NULL, - `add_time` datetime DEFAULT NULL, - `update_time` datetime DEFAULT NULL, - `author` varchar(64) DEFAULT NULL COMMENT '作者', - `alarm_email` varchar(255) DEFAULT NULL COMMENT '报警邮件', - `schedule_type` varchar(50) NOT NULL DEFAULT 'NONE' COMMENT '调度类型', - `schedule_conf` varchar(128) DEFAULT NULL COMMENT '调度配置,值含义取决于调度类型', - `misfire_strategy` varchar(50) NOT NULL DEFAULT 'DO_NOTHING' COMMENT '调度过期策略', - `executor_route_strategy` varchar(50) DEFAULT NULL COMMENT '执行器路由策略', - `executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler', - `executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数', - `executor_block_strategy` varchar(50) DEFAULT NULL COMMENT '阻塞处理策略', - `executor_timeout` int(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位秒', - `executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数', - `glue_type` varchar(50) NOT NULL COMMENT 'GLUE类型', - `glue_source` mediumtext COMMENT 'GLUE源代码', - `glue_remark` varchar(128) DEFAULT NULL COMMENT 'GLUE备注', - `glue_updatetime` datetime DEFAULT NULL COMMENT 'GLUE更新时间', - `child_jobid` varchar(255) DEFAULT NULL COMMENT '子任务ID,多个逗号分隔', - `trigger_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '调度状态:0-停止,1-运行', - `trigger_last_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '上次调度时间', - `trigger_next_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '下次调度时间', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_info` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `job_group` INT(11) NOT NULL COMMENT '执行器主键ID', + `job_desc` VARCHAR(255) NOT NULL, + `add_time` DATETIME DEFAULT NULL, + `update_time` DATETIME DEFAULT NULL, + `author` VARCHAR(64) DEFAULT NULL COMMENT '作者', + `alarm_email` VARCHAR(255) DEFAULT NULL COMMENT '报警邮件', + `schedule_type` VARCHAR(50) NOT NULL DEFAULT 'NONE' COMMENT '调度类型', + `schedule_conf` VARCHAR(128) DEFAULT NULL COMMENT '调度配置,值含义取决于调度类型', + `misfire_strategy` VARCHAR(50) NOT NULL DEFAULT 'DO_NOTHING' COMMENT '调度过期策略', + `executor_route_strategy` VARCHAR(50) DEFAULT NULL COMMENT '执行器路由策略', + `executor_handler` VARCHAR(255) DEFAULT NULL COMMENT '执行器任务handler', + `executor_param` VARCHAR(512) DEFAULT NULL COMMENT '执行器任务参数', + `executor_block_strategy` VARCHAR(50) DEFAULT NULL COMMENT '阻塞处理策略', + `executor_timeout` INT(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位秒', + `executor_fail_retry_count` INT(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数', + `glue_type` VARCHAR(50) NOT NULL COMMENT 'GLUE类型', + `glue_source` MEDIUMTEXT COMMENT 'GLUE源代码', + `glue_remark` VARCHAR(128) DEFAULT NULL COMMENT 'GLUE备注', + `glue_updatetime` DATETIME DEFAULT NULL COMMENT 'GLUE更新时间', + `child_jobid` VARCHAR(255) DEFAULT NULL COMMENT '子任务ID,多个逗号分隔', + `trigger_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '调度状态:0-停止,1-运行', + `trigger_last_time` BIGINT(13) NOT NULL DEFAULT '0' COMMENT '上次调度时间', + `trigger_next_time` BIGINT(13) NOT NULL DEFAULT '0' COMMENT '下次调度时间', + PRIMARY KEY (`id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_info'; -drop table IF EXISTS xxl_job_log; -CREATE TABLE `xxl_job_log` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `job_group` int(11) NOT NULL COMMENT '执行器主键ID', - `job_id` int(11) NOT NULL COMMENT '任务,主键ID', - `executor_address` varchar(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址', - `executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler', - `executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数', - `executor_sharding_param` varchar(20) DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2', - `executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数', - `trigger_time` datetime DEFAULT NULL COMMENT '调度-时间', - `trigger_code` int(11) NOT NULL COMMENT '调度-结果', - `trigger_msg` text COMMENT '调度-日志', - `handle_time` datetime DEFAULT NULL COMMENT '执行-时间', - `handle_code` int(11) NOT NULL COMMENT '执行-状态', - `handle_msg` text COMMENT '执行-日志', - `alarm_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '告警状态:0-默认、1-无需告警、2-告警成功、3-告警失败', - PRIMARY KEY (`id`), - KEY `I_trigger_time` (`trigger_time`), - KEY `I_handle_code` (`handle_code`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_log` +( + `id` BIGINT(20) NOT NULL AUTO_INCREMENT, + `job_group` INT(11) NOT NULL COMMENT '执行器主键ID', + `job_id` INT(11) NOT NULL COMMENT '任务,主键ID', + `executor_address` VARCHAR(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址', + `executor_handler` VARCHAR(255) DEFAULT NULL COMMENT '执行器任务handler', + `executor_param` VARCHAR(512) DEFAULT NULL COMMENT '执行器任务参数', + `executor_sharding_param` VARCHAR(20) DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2', + `executor_fail_retry_count` INT(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数', + `trigger_time` DATETIME DEFAULT NULL COMMENT '调度-时间', + `trigger_code` INT(11) NOT NULL COMMENT '调度-结果', + `trigger_msg` TEXT COMMENT '调度-日志', + `handle_time` DATETIME DEFAULT NULL COMMENT '执行-时间', + `handle_code` INT(11) NOT NULL COMMENT '执行-状态', + `handle_msg` TEXT COMMENT '执行-日志', + `alarm_status` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '告警状态:0-默认、1-无需告警、2-告警成功、3-告警失败', + PRIMARY KEY (`id`), + KEY `I_trigger_time` (`trigger_time`), + KEY `I_handle_code` (`handle_code`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_log'; -drop table IF EXISTS xxl_job_log_report; -CREATE TABLE `xxl_job_log_report` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `trigger_day` datetime DEFAULT NULL COMMENT '调度-时间', - `running_count` int(11) NOT NULL DEFAULT '0' COMMENT '运行中-日志数量', - `suc_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行成功-日志数量', - `fail_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行失败-日志数量', - `update_time` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `i_trigger_day` (`trigger_day`) USING BTREE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_log_report` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `trigger_day` DATETIME DEFAULT NULL COMMENT '调度-时间', + `running_count` INT(11) NOT NULL DEFAULT '0' COMMENT '运行中-日志数量', + `suc_count` INT(11) NOT NULL DEFAULT '0' COMMENT '执行成功-日志数量', + `fail_count` INT(11) NOT NULL DEFAULT '0' COMMENT '执行失败-日志数量', + `update_time` DATETIME DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `i_trigger_day` (`trigger_day`) USING BTREE +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_log_report'; -drop table IF EXISTS xxl_job_logglue; -CREATE TABLE `xxl_job_logglue` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `job_id` int(11) NOT NULL COMMENT '任务,主键ID', - `glue_type` varchar(50) DEFAULT NULL COMMENT 'GLUE类型', - `glue_source` mediumtext COMMENT 'GLUE源代码', - `glue_remark` varchar(128) NOT NULL COMMENT 'GLUE备注', - `add_time` datetime DEFAULT NULL, - `update_time` datetime DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_logglue` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `job_id` INT(11) NOT NULL COMMENT '任务,主键ID', + `glue_type` VARCHAR(50) DEFAULT NULL COMMENT 'GLUE类型', + `glue_source` MEDIUMTEXT COMMENT 'GLUE源代码', + `glue_remark` VARCHAR(128) NOT NULL COMMENT 'GLUE备注', + `add_time` DATETIME DEFAULT NULL, + `update_time` DATETIME DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_logglue'; -drop table IF EXISTS xxl_job_registry; -CREATE TABLE `xxl_job_registry` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `registry_group` varchar(50) NOT NULL, - `registry_key` varchar(255) NOT NULL, - `registry_value` varchar(255) NOT NULL, - `update_time` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `i_g_k_v` (`registry_group`,`registry_key`,`registry_value`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_registry` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `registry_group` VARCHAR(50) NOT NULL, + `registry_key` VARCHAR(255) NOT NULL, + `registry_value` VARCHAR(255) NOT NULL, + `update_time` DATETIME DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `i_g_k_v` (`registry_group`, `registry_key`, `registry_value`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_registry'; -drop table IF EXISTS xxl_job_group; -CREATE TABLE `xxl_job_group` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `app_name` varchar(64) NOT NULL COMMENT '执行器AppName', - `title` varchar(12) NOT NULL COMMENT '执行器名称', - `address_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '执行器地址类型:0=自动注册、1=手动录入', - `address_list` text COMMENT '执行器地址列表,多地址逗号分隔', - `update_time` datetime DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_group` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `app_name` VARCHAR(64) NOT NULL COMMENT '执行器AppName', + `title` VARCHAR(12) NOT NULL COMMENT '执行器名称', + `address_type` TINYINT(4) NOT NULL DEFAULT '0' COMMENT '执行器地址类型:0=自动注册、1=手动录入', + `address_list` TEXT COMMENT '执行器地址列表,多地址逗号分隔', + `update_time` DATETIME DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_group'; -drop table IF EXISTS xxl_job_user; -CREATE TABLE `xxl_job_user` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `username` varchar(50) NOT NULL COMMENT '账号', - `password` varchar(50) NOT NULL COMMENT '密码', - `role` tinyint(4) NOT NULL COMMENT '角色:0-普通用户、1-管理员', - `permission` varchar(255) DEFAULT NULL COMMENT '权限:执行器ID列表,多个逗号分割', - PRIMARY KEY (`id`), - UNIQUE KEY `i_username` (`username`) USING BTREE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_user` +( + `id` INT(11) NOT NULL AUTO_INCREMENT, + `username` VARCHAR(50) NOT NULL COMMENT '账号', + `password` VARCHAR(50) NOT NULL COMMENT '密码', + `role` TINYINT(4) NOT NULL COMMENT '角色:0-普通用户、1-管理员', + `permission` VARCHAR(255) DEFAULT NULL COMMENT '权限:执行器ID列表,多个逗号分割', + PRIMARY KEY (`id`), + UNIQUE KEY `i_username` (`username`) USING BTREE +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_user'; -drop table IF EXISTS xxl_job_lock; -CREATE TABLE `xxl_job_lock` ( - `lock_name` varchar(50) NOT NULL COMMENT '锁名称', - PRIMARY KEY (`lock_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +CREATE TABLE IF NOT EXISTS `xxl_job_lock` +( + `lock_name` VARCHAR(50) NOT NULL COMMENT '锁名称', + PRIMARY KEY (`lock_name`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4 COMMENT ='xxl_job_lock'; -INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`, `update_time`) VALUES (1, 'xxl-job-executor-sample', '示例执行器', 0, NULL, '2018-11-03 22:21:31' ); -INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `schedule_type`, `schedule_conf`, `misfire_strategy`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`) VALUES (1, 1, '测试任务1', '2018-11-03 22:21:31', '2018-11-03 22:21:31', 'XXL', '', 'CRON', '0 0 0 * * ? *', 'DO_NOTHING', 'FIRST', 'demoJobHandler', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2018-11-03 22:21:31', ''); -INSERT INTO `xxl_job_user`(`id`, `username`, `password`, `role`, `permission`) VALUES (1, 'admin', 'e10adc3949ba59abbe56e057f20f883e', 1, NULL); -INSERT INTO `xxl_job_lock` ( `lock_name`) VALUES ( 'schedule_lock'); +INSERT INTO `xxl_job_group` (`id`, `app_name`, `title`, `address_type`, `address_list`, `update_time`) +VALUES (1, 'xxl-job-executor-sample', '示例执行器', 0, NULL, '2018-11-03 22:21:31'); +INSERT INTO `xxl_job_info` (`id`, `job_group`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, + `schedule_type`, `schedule_conf`, `misfire_strategy`, `executor_route_strategy`, + `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, + `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, + `child_jobid`) +VALUES (1, 1, '测试任务1', '2018-11-03 22:21:31', '2018-11-03 22:21:31', 'XXL', '', 'CRON', '0 0 0 * * ? *', + 'DO_NOTHING', 'FIRST', 'demoJobHandler', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', + '2018-11-03 22:21:31', ''); + +INSERT INTO xxl_job_user (id, username, password, role, permission) +VALUES (1, 'admin', 'e10adc3949ba59abbe56e057f20f883e', 1, NULL); + +INSERT INTO xxl_job_lock (lock_name) +VALUES ('schedule_lock'); \ No newline at end of file From c51257e621c2b5426285be6153ae32f6a432c3ec Mon Sep 17 00:00:00 2001 From: xiaoxiamo <82970607@qq.com> Date: Wed, 17 Jul 2024 22:47:53 +0800 Subject: [PATCH 2/5] =?UTF-8?q?style:=20=E8=A7=84=E8=8C=83=E5=8C=96?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=88=E4=BA=8C=EF=BC=89=EF=BC=9A=E8=A7=A3?= =?UTF-8?q?=E5=86=B3=E4=B8=89=E5=8D=81=E4=BD=99=E5=A4=84=E4=BB=A3=E7=A0=81?= =?UTF-8?q?=E7=BC=96=E8=AF=91=E8=AD=A6=E5=91=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../com/java3y/austin/cron/handler/CronTaskHandler.java | 2 +- .../cron/handler/NightShieldLazyPendingHandler.java | 6 +++--- .../com/java3y/austin/datahouse/AustinHiveBootStrap.java | 2 +- .../austin/handler/config/AlipayClientSingleton.java | 2 +- .../com/java3y/austin/handler/handler/HandlerHolder.java | 2 +- .../java3y/austin/handler/handler/impl/SmsHandler.java | 7 ++----- .../java3y/austin/handler/pending/TaskPendingHolder.java | 4 ++-- .../austin/handler/receiver/kafka/ReceiverStart.java | 2 +- .../austin/handler/script/impl/TencentSmsScript.java | 2 +- .../austin/handler/script/impl/YunPianSmsScript.java | 3 +-- .../service/api/impl/action/recall/RecallMqAction.java | 2 +- .../austin/service/api/impl/action/send/SendMqAction.java | 2 +- .../com/java3y/austin/stream/utils/LettuceRedisUtils.java | 2 +- .../support/mq/eventbus/EventBusSendMqServiceImpl.java | 2 +- .../austin/support/mq/kafka/KafkaSendMqServiceImpl.java | 4 ++-- .../mq/springeventbus/AustinSpringEventBusEvent.java | 2 +- .../austin/support/service/impl/ConfigServiceImpl.java | 2 +- .../com/java3y/austin/support/utils/AccountUtils.java | 4 ++-- .../java3y/austin/web/service/impl/DataServiceImpl.java | 2 +- .../austin/web/service/impl/MaterialServiceImpl.java | 2 +- .../java/com/java3y/austin/web/utils/Convert4Amis.java | 8 ++++---- 21 files changed, 30 insertions(+), 34 deletions(-) diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/handler/CronTaskHandler.java b/austin-cron/src/main/java/com/java3y/austin/cron/handler/CronTaskHandler.java index 7587e3c..0c8efbd 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/handler/CronTaskHandler.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/handler/CronTaskHandler.java @@ -25,7 +25,7 @@ public class CronTaskHandler { @Autowired private ThreadPoolUtils threadPoolUtils; - private DtpExecutor dtpExecutor = CronAsyncThreadPoolConfig.getXxlCronExecutor(); + private final DtpExecutor dtpExecutor = CronAsyncThreadPoolConfig.getXxlCronExecutor(); /** * 处理后台的 austin 定时任务消息 diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/handler/NightShieldLazyPendingHandler.java b/austin-cron/src/main/java/com/java3y/austin/cron/handler/NightShieldLazyPendingHandler.java index ed29faa..d70953f 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/handler/NightShieldLazyPendingHandler.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/handler/NightShieldLazyPendingHandler.java @@ -14,7 +14,7 @@ import org.springframework.beans.factory.annotation.Value; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.stereotype.Service; -import java.util.Arrays; +import java.util.Collections; /** @@ -48,8 +48,8 @@ public class NightShieldLazyPendingHandler { String taskInfo = redisUtils.lPop(NIGHT_SHIELD_BUT_NEXT_DAY_SEND_KEY); if (CharSequenceUtil.isNotBlank(taskInfo)) { try { - kafkaTemplate.send(topicName, JSON.toJSONString(Arrays.asList(JSON.parseObject(taskInfo, TaskInfo.class)) - , new SerializerFeature[]{SerializerFeature.WriteClassName})); + kafkaTemplate.send(topicName, JSON.toJSONString(Collections.singletonList(JSON.parseObject(taskInfo, TaskInfo.class)) + , SerializerFeature.WriteClassName)); } catch (Exception e) { log.error("nightShieldLazyJob send kafka fail! e:{},params:{}", Throwables.getStackTraceAsString(e), taskInfo); } diff --git a/austin-data-house/src/main/java/com/java3y/austin/datahouse/AustinHiveBootStrap.java b/austin-data-house/src/main/java/com/java3y/austin/datahouse/AustinHiveBootStrap.java index 656e264..b87d2f5 100644 --- a/austin-data-house/src/main/java/com/java3y/austin/datahouse/AustinHiveBootStrap.java +++ b/austin-data-house/src/main/java/com/java3y/austin/datahouse/AustinHiveBootStrap.java @@ -74,7 +74,7 @@ public class AustinHiveBootStrap { // 3. 将kafka_source 数据写入到kafka_sink 完成 tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT); - tableEnv.executeSql("INSERT INTO " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SINK_TABLE_NAME + " SELECT ids,state,businessId,logTimestamp FROM " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SOURCE_TABLE_NAME + ""); + tableEnv.executeSql("INSERT INTO " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SINK_TABLE_NAME + " SELECT ids,state,businessId,logTimestamp FROM " + DataHouseConstant.CATALOG_DEFAULT_DATABASE + "." + DataHouseConstant.KAFKA_SOURCE_TABLE_NAME); } } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java b/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java index 96dfae3..a221bce 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java @@ -18,7 +18,7 @@ import java.util.Map; public class AlipayClientSingleton { - private static Map alipayClientMap = new HashMap<>(); + private static final Map alipayClientMap = new HashMap<>(); private AlipayClientSingleton() { } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/HandlerHolder.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/HandlerHolder.java index 645ba89..9303d4d 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/HandlerHolder.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/HandlerHolder.java @@ -14,7 +14,7 @@ import java.util.Map; @Component public class HandlerHolder { - private Map handlers = new HashMap<>(128); + private final Map handlers = new HashMap<>(128); public void putHandler(Integer channelCode, Handler handler) { handlers.put(channelCode, handler); diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java index 00b7b2f..68f140c 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java @@ -24,10 +24,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.ApplicationContext; import org.springframework.stereotype.Component; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Random; +import java.util.*; /** * 短信发送处理 @@ -142,7 +139,7 @@ public class SmsHandler extends BaseHandler{ */ if (!taskInfo.getSendAccount().equals(AUTO_FLOW_RULE)) { SmsAccount account = accountUtils.getAccountById(taskInfo.getSendAccount(), SmsAccount.class); - return Arrays.asList(MessageTypeSmsConfig.builder().sendAccount(taskInfo.getSendAccount()).scriptName(account.getScriptName()).weights(100).build()); + return Collections.singletonList(MessageTypeSmsConfig.builder().sendAccount(taskInfo.getSendAccount()).scriptName(account.getScriptName()).weights(100).build()); } /** diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java b/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java index 19aec62..8997cdb 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java @@ -24,10 +24,10 @@ public class TaskPendingHolder { /** * 获取得到所有的groupId */ - private static List groupIds = GroupIdMappingUtils.getAllGroupIds(); + private static final List groupIds = GroupIdMappingUtils.getAllGroupIds(); @Autowired private ThreadPoolUtils threadPoolUtils; - private Map holder = new HashMap<>(32); + private final Map holder = new HashMap<>(32); /** * 给每个渠道,每种消息类型初始化一个线程池 diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java b/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java index 4ba8cc0..f0252cb 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java @@ -39,7 +39,7 @@ public class ReceiverStart { /** * 获取得到所有的groupId */ - private static List groupIds = GroupIdMappingUtils.getAllGroupIds(); + private static final List groupIds = GroupIdMappingUtils.getAllGroupIds(); /** * 下标(用于迭代groupIds位置) */ diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/TencentSmsScript.java b/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/TencentSmsScript.java index 520ad94..b08b9b7 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/TencentSmsScript.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/TencentSmsScript.java @@ -152,7 +152,7 @@ public class TencentSmsScript implements SmsScript { */ private List assemblePullSmsRecord(TencentSmsAccount account, PullSmsSendStatusResponse resp) { List smsRecordList = new ArrayList<>(); - if (Objects.nonNull(resp) && Objects.nonNull(resp.getPullSmsSendStatusSet()) && resp.getPullSmsSendStatusSet().length > 0) { + if (Objects.nonNull(resp) && Objects.nonNull(resp.getPullSmsSendStatusSet())) { for (PullSmsSendStatus pullSmsSendStatus : resp.getPullSmsSendStatusSet()) { SmsRecord smsRecord = SmsRecord.builder() .sendDate(Integer.valueOf(DateUtil.format(new Date(), DatePattern.PURE_DATE_PATTERN))) diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/YunPianSmsScript.java b/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/YunPianSmsScript.java index 01c6a61..b411de4 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/YunPianSmsScript.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/script/impl/YunPianSmsScript.java @@ -6,7 +6,6 @@ import cn.hutool.core.net.URLEncodeUtil; import cn.hutool.core.text.CharSequenceUtil; import cn.hutool.core.text.StrPool; import cn.hutool.core.util.ArrayUtil; -import cn.hutool.core.util.StrUtil; import cn.hutool.http.Header; import cn.hutool.http.HttpRequest; import com.alibaba.fastjson.JSON; @@ -38,7 +37,7 @@ public class YunPianSmsScript implements SmsScript { private static final String PARAMS_SPLIT_KEY = "{|}"; private static final String PARAMS_KV_SPLIT_KEY = "{:}"; - private static Logger log = LoggerFactory.getLogger(YunPianSmsScript.class); + private static final Logger log = LoggerFactory.getLogger(YunPianSmsScript.class); @Autowired private AccountUtils accountUtils; diff --git a/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/recall/RecallMqAction.java b/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/recall/RecallMqAction.java index 1afbdb0..4d91fcd 100644 --- a/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/recall/RecallMqAction.java +++ b/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/recall/RecallMqAction.java @@ -37,7 +37,7 @@ public class RecallMqAction implements BusinessProcess { public void process(ProcessContext context) { RecallTaskInfo recallTaskInfo = context.getProcessModel().getRecallTaskInfo(); try { - String message = JSON.toJSONString(recallTaskInfo, new SerializerFeature[]{SerializerFeature.WriteClassName}); + String message = JSON.toJSONString(recallTaskInfo, SerializerFeature.WriteClassName); sendMqService.send(austinRecall, message, tagId); } catch (Exception e) { context.setNeedBreak(true).setResponse(BasicResultVO.fail(RespStatusEnum.SERVICE_ERROR)); diff --git a/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/send/SendMqAction.java b/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/send/SendMqAction.java index 0fee9da..2e07b6d 100644 --- a/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/send/SendMqAction.java +++ b/austin-service-api-impl/src/main/java/com/java3y/austin/service/api/impl/action/send/SendMqAction.java @@ -47,7 +47,7 @@ public class SendMqAction implements BusinessProcess { SendTaskModel sendTaskModel = context.getProcessModel(); List taskInfo = sendTaskModel.getTaskInfo(); try { - String message = JSON.toJSONString(sendTaskModel.getTaskInfo(), new SerializerFeature[]{SerializerFeature.WriteClassName}); + String message = JSON.toJSONString(sendTaskModel.getTaskInfo(), SerializerFeature.WriteClassName); sendMqService.send(sendMessageTopic, message, tagId); context.setResponse(BasicResultVO.success(taskInfo.stream().map(v -> SimpleTaskInfo.builder().businessId(v.getBusinessId()).messageId(v.getMessageId()).bizId(v.getBizId()).build()).collect(Collectors.toList()))); diff --git a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java index 0639839..af41f52 100644 --- a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java +++ b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java @@ -23,7 +23,7 @@ public class LettuceRedisUtils { /** * 初始化 redisClient */ - private static RedisClient redisClient; + private static final RedisClient redisClient; static { RedisURI redisUri = RedisURI.Builder.redis(AustinFlinkConstant.REDIS_IP) diff --git a/austin-support/src/main/java/com/java3y/austin/support/mq/eventbus/EventBusSendMqServiceImpl.java b/austin-support/src/main/java/com/java3y/austin/support/mq/eventbus/EventBusSendMqServiceImpl.java index e5d3a28..4bcfedc 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/mq/eventbus/EventBusSendMqServiceImpl.java +++ b/austin-support/src/main/java/com/java3y/austin/support/mq/eventbus/EventBusSendMqServiceImpl.java @@ -21,7 +21,7 @@ import org.springframework.stereotype.Service; @Service @ConditionalOnProperty(name = "austin.mq.pipeline", havingValue = MessageQueuePipeline.EVENT_BUS) public class EventBusSendMqServiceImpl implements SendMqService { - private EventBus eventBus = new EventBus(); + private final EventBus eventBus = new EventBus(); @Autowired private EventBusListener eventBusListener; diff --git a/austin-support/src/main/java/com/java3y/austin/support/mq/kafka/KafkaSendMqServiceImpl.java b/austin-support/src/main/java/com/java3y/austin/support/mq/kafka/KafkaSendMqServiceImpl.java index 16d8166..bd19329 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/mq/kafka/KafkaSendMqServiceImpl.java +++ b/austin-support/src/main/java/com/java3y/austin/support/mq/kafka/KafkaSendMqServiceImpl.java @@ -14,7 +14,7 @@ import org.springframework.kafka.core.KafkaTemplate; import org.springframework.stereotype.Service; import java.nio.charset.StandardCharsets; -import java.util.Arrays; +import java.util.Collections; import java.util.List; @@ -36,7 +36,7 @@ public class KafkaSendMqServiceImpl implements SendMqService { @Override public void send(String topic, String jsonValue, String tagId) { if (CharSequenceUtil.isNotBlank(tagId)) { - List
headers = Arrays.asList(new RecordHeader(tagIdKey, tagId.getBytes(StandardCharsets.UTF_8))); + List
headers = Collections.singletonList(new RecordHeader(tagIdKey, tagId.getBytes(StandardCharsets.UTF_8))); kafkaTemplate.send(new ProducerRecord(topic, null, null, null, jsonValue, headers)); return; } diff --git a/austin-support/src/main/java/com/java3y/austin/support/mq/springeventbus/AustinSpringEventBusEvent.java b/austin-support/src/main/java/com/java3y/austin/support/mq/springeventbus/AustinSpringEventBusEvent.java index 961a6ce..cb118d6 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/mq/springeventbus/AustinSpringEventBusEvent.java +++ b/austin-support/src/main/java/com/java3y/austin/support/mq/springeventbus/AustinSpringEventBusEvent.java @@ -12,7 +12,7 @@ import org.springframework.context.ApplicationEvent; @Getter public class AustinSpringEventBusEvent extends ApplicationEvent { - private AustinSpringEventSource austinSpringEventSource; + private final AustinSpringEventSource austinSpringEventSource; public AustinSpringEventBusEvent(Object source, AustinSpringEventSource austinSpringEventSource) { super(source); diff --git a/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java b/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java index b38351c..dd8086e 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java +++ b/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java @@ -23,7 +23,7 @@ public class ConfigServiceImpl implements ConfigService { * 本地配置 */ private static final String PROPERTIES_PATH = "local.properties"; - private Props props = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8); + private final Props props = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8); /** * apollo配置 diff --git a/austin-support/src/main/java/com/java3y/austin/support/utils/AccountUtils.java b/austin-support/src/main/java/com/java3y/austin/support/utils/AccountUtils.java index 91817aa..e1ece79 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/utils/AccountUtils.java +++ b/austin-support/src/main/java/com/java3y/austin/support/utils/AccountUtils.java @@ -45,8 +45,8 @@ public class AccountUtils { /** * 消息的小程序/微信服务号账号 */ - private ConcurrentMap officialAccountServiceMap = new ConcurrentHashMap<>(); - private ConcurrentMap miniProgramServiceMap = new ConcurrentHashMap<>(); + private final ConcurrentMap officialAccountServiceMap = new ConcurrentHashMap<>(); + private final ConcurrentMap miniProgramServiceMap = new ConcurrentHashMap<>(); @Bean public RedisTemplateWxRedisOps redisTemplateWxRedisOps() { diff --git a/austin-web/src/main/java/com/java3y/austin/web/service/impl/DataServiceImpl.java b/austin-web/src/main/java/com/java3y/austin/web/service/impl/DataServiceImpl.java index 8057203..3c9eeb4 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/service/impl/DataServiceImpl.java +++ b/austin-web/src/main/java/com/java3y/austin/web/service/impl/DataServiceImpl.java @@ -101,7 +101,7 @@ public class DataServiceImpl implements DataService { Integer sendDate = Integer.valueOf(DateUtil.format(new Date(dataParam.getDateTime() * 1000L), DatePattern.PURE_DATE_PATTERN)); List smsRecordList = smsRecordDao.findByPhoneAndSendDate(Long.valueOf(dataParam.getReceiver()), sendDate); if (CollUtil.isEmpty(smsRecordList)) { - return SmsTimeLineVo.builder().items(Arrays.asList(SmsTimeLineVo.ItemsVO.builder().build())).build(); + return SmsTimeLineVo.builder().items(Collections.singletonList(SmsTimeLineVo.ItemsVO.builder().build())).build(); } Map> maps = smsRecordList.stream().collect(Collectors.groupingBy(o -> o.getPhone() + o.getSeriesId())); diff --git a/austin-web/src/main/java/com/java3y/austin/web/service/impl/MaterialServiceImpl.java b/austin-web/src/main/java/com/java3y/austin/web/service/impl/MaterialServiceImpl.java index dfe1c87..ead5443 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/service/impl/MaterialServiceImpl.java +++ b/austin-web/src/main/java/com/java3y/austin/web/service/impl/MaterialServiceImpl.java @@ -55,7 +55,7 @@ public class MaterialServiceImpl implements MaterialService { String accessToken = accessTokenUtils.getAccessToken(ChannelType.DING_DING_WORK_NOTICE.getCode(), Integer.valueOf(sendAccount), account, false); DingTalkClient client = new DefaultDingTalkClient(SendChanelUrlConstant.DING_DING_UPLOAD_URL); OapiMediaUploadRequest req = new OapiMediaUploadRequest(); - FileItem item = new FileItem(new StringBuilder().append(IdUtil.fastSimpleUUID()).append(file.getOriginalFilename()).toString(), + FileItem item = new FileItem(IdUtil.fastSimpleUUID() + file.getOriginalFilename(), file.getInputStream()); req.setMedia(item); req.setType(EnumUtil.getDescriptionByCode(Integer.valueOf(fileType), FileType.class)); diff --git a/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java b/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java index 729ae87..2563a4a 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java +++ b/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java @@ -58,7 +58,7 @@ public class Convert4Amis { * 需要打散的字段(将json字符串打散为一个一个字段返回) * (主要是用于回显数据) */ - private static final List FLAT_FIELD_NAME = Arrays.asList("msgContent"); + private static final List FLAT_FIELD_NAME = Collections.singletonList("msgContent"); /** * 需要格式化为jsonArray返回的字段 @@ -350,7 +350,7 @@ public class Convert4Amis { List columnsDtoS = new ArrayList<>(); //使用i作为变量循环 for (int i=0;i Date: Fri, 19 Jul 2024 17:29:34 +0800 Subject: [PATCH 3/5] =?UTF-8?q?style:=20=E8=A7=84=E8=8C=83=E5=8C=96?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=88=E4=B8=89=EF=BC=89=EF=BC=9A=E5=91=BD?= =?UTF-8?q?=E5=90=8D=E8=A7=84=E8=8C=83=E4=BF=AE=E6=94=B9=E3=80=81docker-co?= =?UTF-8?q?mpose=E6=96=87=E4=BB=B6=E8=A7=84=E8=8C=83=E3=80=81=E5=88=A0?= =?UTF-8?q?=E9=99=A4=E6=97=A0=E7=94=A8=E6=8A=A5=E9=94=99=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../handler/config/AlipayClientSingleton.java | 10 +- .../impl/AlipayMiniProgramAccountHandler.java | 2 +- .../handler/pending/TaskPendingHolder.java | 4 +- .../handler/receiver/kafka/ReceiverStart.java | 6 +- .../stream/utils/LettuceRedisUtils.java | 6 +- doc/docker/graylog/docker-compose.yml | 10 +- doc/docker/mysql/docker-compose.yml | 2 +- doc/docker/nacos/cluster/docker-compose.yml | 8 +- doc/docker/nacos/single/docker-compose.yml | 6 +- .../rabbitmq/config/rabbitmq_bak.config | 925 ------------------ doc/docker/redis/docker-compose.yaml | 2 +- .../rocketmq/docker-compose-rocketmq.yml | 8 +- docker-compose.yml | 6 +- 13 files changed, 35 insertions(+), 960 deletions(-) delete mode 100644 doc/docker/rabbitmq/rabbitmq/config/rabbitmq_bak.config diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java b/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java index a221bce..1c7752b 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/config/AlipayClientSingleton.java @@ -18,15 +18,15 @@ import java.util.Map; public class AlipayClientSingleton { - private static final Map alipayClientMap = new HashMap<>(); + private static final Map ALIPAY_CLIENT_MAP = new HashMap<>(); private AlipayClientSingleton() { } public static DefaultAlipayClient getSingleton(AlipayMiniProgramAccount alipayMiniProgramAccount) throws AlipayApiException { - if (!alipayClientMap.containsKey(alipayMiniProgramAccount.getAppId())) { + if (!ALIPAY_CLIENT_MAP.containsKey(alipayMiniProgramAccount.getAppId())) { synchronized (DefaultAlipayClient.class) { - if (!alipayClientMap.containsKey(alipayMiniProgramAccount.getAppId())) { + if (!ALIPAY_CLIENT_MAP.containsKey(alipayMiniProgramAccount.getAppId())) { AlipayConfig alipayConfig = new AlipayConfig(); alipayConfig.setServerUrl(SendChanelUrlConstant.ALI_MINI_PROGRAM_GATEWAY_URL); alipayConfig.setAppId(alipayMiniProgramAccount.getAppId()); @@ -35,10 +35,10 @@ public class AlipayClientSingleton { alipayConfig.setAlipayPublicKey(alipayMiniProgramAccount.getAlipayPublicKey()); alipayConfig.setCharset("utf-8"); alipayConfig.setSignType("RSA2"); - alipayClientMap.put(alipayMiniProgramAccount.getAppId(), new DefaultAlipayClient(alipayConfig)); + ALIPAY_CLIENT_MAP.put(alipayMiniProgramAccount.getAppId(), new DefaultAlipayClient(alipayConfig)); } } } - return alipayClientMap.get(alipayMiniProgramAccount.getAppId()); + return ALIPAY_CLIENT_MAP.get(alipayMiniProgramAccount.getAppId()); } } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/AlipayMiniProgramAccountHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/AlipayMiniProgramAccountHandler.java index 73e8e95..ecdbf60 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/AlipayMiniProgramAccountHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/AlipayMiniProgramAccountHandler.java @@ -67,7 +67,7 @@ public class AlipayMiniProgramAccountHandler extends BaseHandler{ .collect(Collectors.toMap( Map.Entry::getKey, entry -> { - Map valueMap = new HashMap<>(); + Map valueMap = new HashMap<>(1); valueMap.put("value", entry.getValue()); return valueMap; } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java b/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java index 8997cdb..277db78 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/pending/TaskPendingHolder.java @@ -24,7 +24,7 @@ public class TaskPendingHolder { /** * 获取得到所有的groupId */ - private static final List groupIds = GroupIdMappingUtils.getAllGroupIds(); + private static final List GROUP_IDS = GroupIdMappingUtils.getAllGroupIds(); @Autowired private ThreadPoolUtils threadPoolUtils; private final Map holder = new HashMap<>(32); @@ -39,7 +39,7 @@ public class TaskPendingHolder { * * 可以通过apollo配置:dynamic-tp-apollo-dtp.yml 动态修改线程池的信息 */ - for (String groupId : groupIds) { + for (String groupId : GROUP_IDS) { DtpExecutor executor = HandlerThreadPoolConfig.getExecutor(groupId); threadPoolUtils.register(executor); diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java b/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java index f0252cb..2e4d0d6 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/receiver/kafka/ReceiverStart.java @@ -39,7 +39,7 @@ public class ReceiverStart { /** * 获取得到所有的groupId */ - private static final List groupIds = GroupIdMappingUtils.getAllGroupIds(); + private static final List GROUP_IDS = GroupIdMappingUtils.getAllGroupIds(); /** * 下标(用于迭代groupIds位置) */ @@ -58,7 +58,7 @@ public class ReceiverStart { if (element instanceof Method) { String name = ((Method) element).getDeclaringClass().getSimpleName() + StrPool.DOT + ((Method) element).getName(); if (RECEIVER_METHOD_NAME.equals(name)) { - attrs.put("groupId", groupIds.get(index++)); + attrs.put("groupId", GROUP_IDS.get(index++)); } } return attrs; @@ -70,7 +70,7 @@ public class ReceiverStart { */ @PostConstruct public void init() { - for (int i = 0; i < groupIds.size(); i++) { + for (int i = 0; i < GROUP_IDS.size(); i++) { context.getBean(Receiver.class); } } diff --git a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java index af41f52..84e7e15 100644 --- a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java +++ b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java @@ -23,14 +23,14 @@ public class LettuceRedisUtils { /** * 初始化 redisClient */ - private static final RedisClient redisClient; + private static final RedisClient REDIS_CLIENT; static { RedisURI redisUri = RedisURI.Builder.redis(AustinFlinkConstant.REDIS_IP) .withPort(Integer.valueOf(AustinFlinkConstant.REDIS_PORT)) .withPassword(AustinFlinkConstant.REDIS_PASSWORD.toCharArray()) .build(); - redisClient = RedisClient.create(redisUri); + REDIS_CLIENT = RedisClient.create(redisUri); } private LettuceRedisUtils() { @@ -41,7 +41,7 @@ public class LettuceRedisUtils { * 封装pipeline操作 */ public static void pipeline(RedisPipelineCallBack pipelineCallBack) { - StatefulRedisConnection connect = redisClient.connect(new ByteArrayCodec()); + StatefulRedisConnection connect = REDIS_CLIENT.connect(new ByteArrayCodec()); RedisAsyncCommands commands = connect.async(); List> futures = pipelineCallBack.invoke(commands); diff --git a/doc/docker/graylog/docker-compose.yml b/doc/docker/graylog/docker-compose.yml index 141f826..b7b6329 100644 --- a/doc/docker/graylog/docker-compose.yml +++ b/doc/docker/graylog/docker-compose.yml @@ -37,11 +37,11 @@ services: - mongo - elasticsearch ports: - - 9009:9000 - - 1514:1514 - - 1514:1514/udp - - 12201:12201 - - 12201:12201/udp + - "9009:9000" + - "1514:1514" + - "1514:1514/udp" + - "12201:12201" + - "12201:12201/udp" networks: graylog: driver: bridge \ No newline at end of file diff --git a/doc/docker/mysql/docker-compose.yml b/doc/docker/mysql/docker-compose.yml index 408e83b..52337d9 100644 --- a/doc/docker/mysql/docker-compose.yml +++ b/doc/docker/mysql/docker-compose.yml @@ -6,7 +6,7 @@ services: container_name: mysql restart: always ports: - - 3306:3306 + - "3306:3306" volumes: - mysql-data:/var/lib/mysql environment: diff --git a/doc/docker/nacos/cluster/docker-compose.yml b/doc/docker/nacos/cluster/docker-compose.yml index 475088e..389d8d0 100644 --- a/doc/docker/nacos/cluster/docker-compose.yml +++ b/doc/docker/nacos/cluster/docker-compose.yml @@ -22,8 +22,8 @@ services: - /home/nacos/cluster-logs/nacos-server01:/home/nacos/logs - /home/nacos/init.d:/home/nacos/init.d ports: - - 8846:8848 - - 9555:9555 + - "8846:8848" + - "9555:9555" restart: on-failure nacos2: @@ -47,7 +47,7 @@ services: - /home/nacos/cluster-logs/nacos-server02:/home/nacos/logs - /home/nacos/init.d:/home/nacos/init.d ports: - - 8847:8848 + - "8847:8848" restart: on-failure nacos3: @@ -71,5 +71,5 @@ services: - /home/nacos/cluster-logs/nacos-server03:/home/nacos/logs - /home/nacos/init.d:/home/nacos/init.d ports: - - 8848:8848 + - "8848:8848" restart: on-failure \ No newline at end of file diff --git a/doc/docker/nacos/single/docker-compose.yml b/doc/docker/nacos/single/docker-compose.yml index d930f29..8889e38 100644 --- a/doc/docker/nacos/single/docker-compose.yml +++ b/doc/docker/nacos/single/docker-compose.yml @@ -21,7 +21,7 @@ services: - /home/nacos/single-logs/nacos-server:/home/nacos/logs - /home/nacos/init.d:/home/nacos/init.d ports: - - 8848:8848 - - 9848:9848 - - 9849:9849 + - "8848:8848" + - "9848:9848" + - "9849:9849" restart: on-failure \ No newline at end of file diff --git a/doc/docker/rabbitmq/rabbitmq/config/rabbitmq_bak.config b/doc/docker/rabbitmq/rabbitmq/config/rabbitmq_bak.config deleted file mode 100644 index 0169efa..0000000 --- a/doc/docker/rabbitmq/rabbitmq/config/rabbitmq_bak.config +++ /dev/null @@ -1,925 +0,0 @@ -%% -*- mode: erlang -*- -%% ---------------------------------------------------------------------------- -%% Classic RabbitMQ configuration format example. -%% This format should be considered DEPRECATED. -%% -%% Users of RabbitMQ 3.7.x -%% or later should prefer the new style format (rabbitmq.conf) -%% in combination with an advanced.config file (as needed). -%% -%% Related doc guide: https://www.rabbitmq.com/configure.html. See -%% https://rabbitmq.com/documentation.html for documentation ToC. -%% ---------------------------------------------------------------------------- -[ - {rabbit, - [%% - %% Networking - %% ==================== - %% - %% Related doc guide: https://www.rabbitmq.com/networking.html. - - %% By default, RabbitMQ will listen on all interfaces, using - %% the standard (reserved) AMQP port. - %% - %% {tcp_listeners, [5672]}, - - %% To listen on a specific interface, provide a tuple of {IpAddress, Port}. - %% For example, to listen only on localhost for both IPv4 and IPv6: - %% - %% {tcp_listeners, [{"127.0.0.1", 5672}, - %% {"::1", 5672}]}, - - %% TLS listeners are configured in the same fashion as TCP listeners, - %% including the option to control the choice of interface. - %% - %% {ssl_listeners, [5671]}, - - %% Number of Erlang processes that will accept connections for the TCP - %% and TLS listeners. - %% - %% {num_tcp_acceptors, 10}, - %% {num_ssl_acceptors, 1}, - - %% Maximum time for AMQP 0-8/0-9/0-9-1 handshake (after socket connection - %% and TLS handshake), in milliseconds. - %% - %% {handshake_timeout, 10000}, - - %% Set to 'true' to perform reverse DNS lookups when accepting a - %% connection. Hostnames will then be shown instead of IP addresses - %% in rabbitmqctl and the management plugin. - %% - %% {reverse_dns_lookups, false}, - - %% - %% Security, Access Control - %% ======================== - %% - %% Related doc guide: https://www.rabbitmq.com/access-control.html. - - %% The default "guest" user is only permitted to access the server - %% via a loopback interface (e.g. localhost). - %% {loopback_users, [<<"guest">>]}, - %% - %% Uncomment the following line if you want to allow access to the - %% guest user from anywhere on the network. - %% {loopback_users, []}, - - - %% TLS configuration. - %% - %% Related doc guide: https://www.rabbitmq.com/ssl.html. - %% - %% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"}, - %% {certfile, "/path/to/server/cert.pem"}, - %% {keyfile, "/path/to/server/key.pem"}, - %% {verify, verify_peer}, - %% {fail_if_no_peer_cert, false}]}, - - %% Choose the available SASL mechanism(s) to expose. - %% The two default (built in) mechanisms are 'PLAIN' and - %% 'AMQPLAIN'. Additional mechanisms can be added via - %% plugins. - %% - %% Related doc guide: https://www.rabbitmq.com/authentication.html. - %% - %% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, - - %% Select an authentication database to use. RabbitMQ comes bundled - %% with a built-in auth-database, based on mnesia. - %% - %% {auth_backends, [rabbit_auth_backend_internal]}, - - %% Configurations supporting the rabbitmq_auth_mechanism_ssl and - %% rabbitmq_auth_backend_ldap plugins. - %% - %% NB: These options require that the relevant plugin is enabled. - %% Related doc guide: https://www.rabbitmq.com/plugins.html for further details. - - %% The RabbitMQ-auth-mechanism-ssl plugin makes it possible to - %% authenticate a user based on the client's TLS certificate. - %% - %% To use auth-mechanism-ssl, add to or replace the auth_mechanisms - %% list with the entry 'EXTERNAL'. - %% - %% {auth_mechanisms, ['EXTERNAL']}, - - %% The rabbitmq_auth_backend_ldap plugin allows the broker to - %% perform authentication and authorisation by deferring to an - %% external LDAP server. - %% - %% For more information about configuring the LDAP backend, see - %% https://www.rabbitmq.com/ldap.html. - %% - %% Enable the LDAP auth backend by adding to or replacing the - %% auth_backends entry: - %% - %% {auth_backends, [rabbit_auth_backend_ldap]}, - - %% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and - %% STOMP ssl_cert_login configurations. See the rabbitmq_stomp - %% configuration section later in this file and the README in - %% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further - %% details. - %% - %% To use the TLS cert's CN instead of its DN as the username - %% - %% {ssl_cert_login_from, distinguished_name}, - - %% TLS handshake timeout, in milliseconds. - %% - %% {ssl_handshake_timeout, 5000}, - - %% Makes RabbitMQ accept SSLv3 client connections by default. - %% DO NOT DO THIS IF YOU CAN HELP IT. - %% - %% {ssl_allow_poodle_attack, false}, - - %% Password hashing implementation. Will only affect newly - %% created users. To recalculate hash for an existing user - %% it's necessary to update her password. - %% - %% When importing definitions exported from versions earlier - %% than 3.6.0, it is possible to go back to MD5 (only do this - %% as a temporary measure!) by setting this to rabbit_password_hashing_md5. - %% - %% To use SHA-512, set to rabbit_password_hashing_sha512. - %% - %% {password_hashing_module, rabbit_password_hashing_sha256}, - - %% Configuration entry encryption. - %% Related doc guide: https://www.rabbitmq.com/configure.html#configuration-encryption - %% - %% To specify the passphrase in the configuration file: - %% - %% {config_entry_decoder, [{passphrase, <<"mypassphrase">>}]} - %% - %% To specify the passphrase in an external file: - %% - %% {config_entry_decoder, [{passphrase, {file, "/path/to/passphrase/file"}}]} - %% - %% To make the broker request the passphrase when it starts: - %% - %% {config_entry_decoder, [{passphrase, prompt}]} - %% - %% To change encryption settings: - %% - %% {config_entry_decoder, [{cipher, aes_cbc256}, - %% {hash, sha512}, - %% {iterations, 1000}]} - - %% - %% Default User / VHost - %% ==================== - %% - - %% On first start RabbitMQ will create a vhost and a user. These - %% config items control what gets created. See - %% https://www.rabbitmq.com/access-control.html for further - %% information about vhosts and access control. - %% - %% {default_vhost, <<"/">>}, - %% {default_user, <<"guest">>}, - %% {default_pass, <<"guest">>}, - %% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, - - %% Tags for default user - %% - %% Related doc guide: https://www.rabbitmq.com/management.html. - %% - %% {default_user_tags, [administrator]}, - - %% - %% Additional network and protocol related configuration - %% ===================================================== - %% - - %% Sets the default AMQP 0-9-1 heartbeat timeout in seconds. - %% Values lower than 6 can produce false positives and are not - %% recommended. - %% - %% Related doc guides: - %% - %% * https://www.rabbitmq.com/heartbeats.html - %% * https://www.rabbitmq.com/networking.html - %% - %% {heartbeat, 60}, - - %% Set the max permissible size of an AMQP frame (in bytes). - %% - %% {frame_max, 131072}, - - %% Set the max frame size the server will accept before connection - %% tuning occurs - %% - %% {initial_frame_max, 4096}, - - %% Set the max permissible number of channels per connection. - %% 0 means "no limit". - %% - %% {channel_max, 0}, - - %% Set the max permissible number of client connections to the node. - %% `infinity` means "no limit". - %% - %% This limit applies to client connections to all listeners (regardless of - %% the protocol, whether TLS is used and so on). CLI tools and inter-node - %% connections are exempt. - %% - %% When client connections are rapidly opened in succession, it is possible - %% for the total connection count to go slightly higher than the configured limit. - %% The limit works well as a general safety measure. - %% - %% Clients that are hitting the limit will see their TCP connections fail or time out. - %% - %% Introduced in 3.6.13. - %% - %% Related doc guide: https://www.rabbitmq.com/networking.html. - %% - %% {connection_max, infinity}, - - %% TCP socket options. - %% - %% Related doc guide: https://www.rabbitmq.com/networking.html. - %% - %% {tcp_listen_options, [{backlog, 128}, - %% {nodelay, true}, - %% {exit_on_close, false}]}, - - %% - %% Resource Limits & Flow Control - %% ============================== - %% - %% Related doc guide: https://www.rabbitmq.com/memory.html, https://www.rabbitmq.com/memory-use.html. - - %% Memory-based Flow Control threshold. - %% - %% {vm_memory_high_watermark, 0.4}, - - %% Alternatively, we can set a limit (in bytes) of RAM used by the node. - %% - %% {vm_memory_high_watermark, {absolute, 1073741824}}, - %% - %% Or you can set absolute value using memory units (with RabbitMQ 3.6.0+). - %% - %% {vm_memory_high_watermark, {absolute, "1024M"}}, - %% - %% Supported unit symbols: - %% - %% k, kiB: kibibytes (2^10 - 1,024 bytes) - %% M, MiB: mebibytes (2^20 - 1,048,576 bytes) - %% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes) - %% kB: kilobytes (10^3 - 1,000 bytes) - %% MB: megabytes (10^6 - 1,000,000 bytes) - %% GB: gigabytes (10^9 - 1,000,000,000 bytes) - - %% Fraction of the high watermark limit at which queues start to - %% page message out to disc in order to free up memory. - %% For example, when vm_memory_high_watermark is set to 0.4 and this value is set to 0.5, - %% paging can begin as early as when 20% of total available RAM is used by the node. - %% - %% Values greater than 1.0 can be dangerous and should be used carefully. - %% - %% One alternative to this is to use durable queues and publish messages - %% as persistent (delivery mode = 2). With this combination queues will - %% move messages to disk much more rapidly. - %% - %% Another alternative is to configure queues to page all messages (both - %% persistent and transient) to disk as quickly - %% as possible, see https://www.rabbitmq.com/lazy-queues.html. - %% - %% {vm_memory_high_watermark_paging_ratio, 0.5}, - - %% Selects Erlang VM memory consumption calculation strategy. Can be `allocated`, `rss` or `legacy` (aliased as `erlang`), - %% Introduced in 3.6.11. `rss` is the default as of 3.6.12. - %% See https://github.com/rabbitmq/rabbitmq-server/issues/1223 and rabbitmq/rabbitmq-common#224 for background. - %% {vm_memory_calculation_strategy, rss}, - - %% Interval (in milliseconds) at which we perform the check of the memory - %% levels against the watermarks. - %% - %% {memory_monitor_interval, 2500}, - - %% The total memory available can be calculated from the OS resources - %% - default option - or provided as a configuration parameter: - %% {total_memory_available_override_value, "5000MB"}, - - %% Set disk free limit (in bytes). Once free disk space reaches this - %% lower bound, a disk alarm will be set - see the documentation - %% listed above for more details. - %% - %% {disk_free_limit, 50000000}, - %% - %% Or you can set it using memory units (same as in vm_memory_high_watermark) - %% with RabbitMQ 3.6.0+. - %% {disk_free_limit, "50MB"}, - %% {disk_free_limit, "50000kB"}, - %% {disk_free_limit, "2GB"}, - - %% Alternatively, we can set a limit relative to total available RAM. - %% - %% Values lower than 1.0 can be dangerous and should be used carefully. - %% {disk_free_limit, {mem_relative, 2.0}}, - - %% - %% Clustering - %% ===================== - %% - - %% Queue master location strategy: - %% * <<"min-masters">> - %% * <<"client-local">> - %% * <<"random">> - %% - %% Related doc guide: https://www.rabbitmq.com/ha.html#queue-master-location - %% - %% {queue_master_locator, <<"client-local">>}, - - %% Batch size (number of messages) used during eager queue mirror synchronisation. - %% Related doc guide: https://www.rabbitmq.com/ha.html#batch-sync. When average message size is relatively large - %% (say, 10s of kilobytes or greater), reducing this value will decrease peak amount - %% of RAM used by newly joining nodes that need eager synchronisation. - %% - %% {mirroring_sync_batch_size, 4096}, - - %% Enables flow control between queue mirrors. - %% Disabling this can be dangerous and is not recommended. - %% When flow control is disabled, queue masters can outpace mirrors and not allow mirrors to catch up. - %% Mirrors will end up using increasingly more RAM, eventually triggering a memory alarm. - %% - %% {mirroring_flow_control, true}, - - %% Additional server properties to announce to connecting clients. - %% - %% {server_properties, []}, - - %% How to respond to cluster partitions. - %% Related doc guide: https://www.rabbitmq.com/partitions.html - %% - %% {cluster_partition_handling, ignore}, - - %% Mirror sync batch size, in messages. Increasing this will speed - %% up syncing but total batch size in bytes must not exceed 2 GiB. - %% Available in RabbitMQ 3.6.0 or later. - %% - %% {mirroring_sync_batch_size, 4096}, - - %% Make clustering happen *automatically* at startup - only applied - %% to nodes that have just been reset or started for the first time. - %% Related doc guide: https://www.rabbitmq.com/clustering.html#auto-config - %% - %% {cluster_nodes, {['rabbit@my.host.com'], disc}}, - - %% Interval (in milliseconds) at which we send keepalive messages - %% to other cluster members. Note that this is not the same thing - %% as net_ticktime; missed keepalive messages will not cause nodes - %% to be considered down. - %% - %% {cluster_keepalive_interval, 10000}, - - %% - %% Statistics Collection - %% ===================== - %% - - %% Set (internal) statistics collection granularity. - %% - %% {collect_statistics, none}, - - %% Statistics collection interval (in milliseconds). Increasing - %% this will reduce the load on management database. - %% - %% {collect_statistics_interval, 5000}, - - %% Enables vhosts tracing. - %% - %% {trace_vhosts, []}, - - %% Explicitly enable/disable HiPE compilation. - %% - %% {hipe_compile, false}, - - %% Number of delegate processes to use for intra-cluster communication. - %% On a node which is part of cluster, has more than 16 cores and plenty of network bandwidth, - %% it may make sense to increase this value. - %% - %% {delegate_count, 16}, - - %% Number of times to retry while waiting for internal database tables (Mnesia tables) to sync - %% from a peer. In deployments where nodes can take a long time to boot, this value - %% may need increasing. - %% - %% {mnesia_table_loading_retry_limit, 10}, - - %% Amount of time in milliseconds which this node will wait for internal database tables (Mnesia tables) to sync - %% from a peer. In deployments where nodes can take a long time to boot, this value - %% may need increasing. - %% - %% {mnesia_table_loading_retry_timeout, 30000}, - - %% Size in bytes below which to embed messages in the queue index. - %% Related doc guide: https://www.rabbitmq.com/persistence-conf.html - %% - %% {queue_index_embed_msgs_below, 4096}, - - %% Maximum number of queue index entries to keep in journal - %% Related doc guide: https://www.rabbitmq.com/persistence-conf.html. - %% - %% {queue_index_max_journal_entries, 32768}, - - %% Number of credits that a queue process is given by the message store - %% By default, a queue process is given 4000 message store credits, - %% and then 800 for every 800 messages that it processes. - %% - %% {msg_store_credit_disc_bound, {4000, 800}}, - - %% Minimum number of messages with their queue position held in RAM required - %% to trigger writing their queue position to disk. - %% - %% This value MUST be higher than the initial msg_store_credit_disc_bound value, - %% otherwise paging performance may worsen. - %% - %% {msg_store_io_batch_size, 4096}, - - %% Number of credits that a connection, channel or queue are given. - %% - %% By default, every connection, channel or queue is given 400 credits, - %% and then 200 for every 200 messages that it sends to a peer process. - %% Increasing these values may help with throughput but also can be dangerous: - %% high credit flow values are no different from not having flow control at all. - %% - %% Related doc guide: https://www.rabbitmq.com/blog/2015/10/06/new-credit-flow-settings-on-rabbitmq-3-5-5/ - %% and http://alvaro-videla.com/2013/09/rabbitmq-internals-credit-flow-for-erlang-processes.html. - %% - %% {credit_flow_default_credit, {400, 200}}, - - %% Number of milliseconds before a channel operation times out. - %% - %% {channel_operation_timeout, 15000}, - - %% Number of queue operations required to trigger an explicit garbage collection. - %% Increasing this value may reduce CPU load and increase peak RAM consumption of queues. - %% - %% {queue_explicit_gc_run_operation_threshold, 1000}, - - %% Number of lazy queue operations required to trigger an explicit garbage collection. - %% Increasing this value may reduce CPU load and increase peak RAM consumption of lazy queues. - %% - %% {lazy_queue_explicit_gc_run_operation_threshold, 1000}, - - %% Number of times disk monitor will retry free disk space queries before - %% giving up. - %% - %% {disk_monitor_failure_retries, 10}, - - %% Milliseconds to wait between disk monitor retries on failures. - %% - %% {disk_monitor_failure_retry_interval, 120000}, - - %% Whether or not to enable background periodic forced GC runs for all - %% Erlang processes on the node in "waiting" state. - %% - %% Disabling background GC may reduce latency for client operations, - %% keeping it enabled may reduce median RAM usage by the binary heap - %% (see https://www.erlang-solutions.com/blog/erlang-garbage-collector.html). - %% - %% Before enabling this option, please take a look at the memory - %% breakdown (https://www.rabbitmq.com/memory-use.html). - %% - %% {background_gc_enabled, false}, - - %% Interval (in milliseconds) at which we run background GC. - %% - %% {background_gc_target_interval, 60000}, - - %% Message store operations are stored in a sequence of files called segments. - %% This controls max size of a segment file. - %% Increasing this value may speed up (sequential) disk writes but will slow down segment GC process. - %% DO NOT CHANGE THIS for existing installations. - %% - %% {msg_store_file_size_limit, 16777216}, - - %% Whether or not to enable file write buffering. - %% - %% {fhc_write_buffering, true}, - - %% Whether or not to enable file read buffering. Enabling - %% this may slightly speed up reads but will also increase - %% node's memory consumption, in particular on boot. - %% - %% {fhc_read_buffering, false} - - ]}, - - %% ---------------------------------------------------------------------------- - %% Advanced Erlang Networking/Clustering Options. - %% - %% Related doc guide: https://www.rabbitmq.com/clustering.html - %% ---------------------------------------------------------------------------- - {kernel, - [%% Sets the net_kernel tick time. - %% Please see http://erlang.org/doc/man/kernel_app.html and - %% https://www.rabbitmq.com/nettick.html for further details. - %% - %% {net_ticktime, 60} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ Management Plugin - %% - %% Related doc guide: https://www.rabbitmq.com/management.html - %% ---------------------------------------------------------------------------- - - {rabbitmq_management, - [%% Preload schema definitions from a previously exported definitions file. See - %% https://www.rabbitmq.com/management.html#load-definitions - %% - %% {load_definitions, "/path/to/exported/definitions.json"}, - - %% Log all requests to the management HTTP API to a directory. - %% - %% {http_log_dir, "/path/to/rabbitmq/logs/http"}, - - %% Change the port on which the HTTP listener listens, - %% specifying an interface for the web server to bind to. - %% Also set the listener to use TLS and provide TLS options. - %% - %% {listener, [{port, 12345}, - %% {ip, "127.0.0.1"}, - %% {ssl, true}, - %% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"}, - %% {certfile, "/path/to/cert.pem"}, - %% {keyfile, "/path/to/key.pem"}]}]}, - - %% One of 'basic', 'detailed' or 'none'. See - %% https://www.rabbitmq.com/management.html#fine-stats for more details. - %% {rates_mode, basic}, - - %% Configure how long aggregated data (such as message rates and queue - %% lengths) is retained. Please read the plugin's documentation in - %% https://www.rabbitmq.com/management.html#configuration for more - %% details. - %% - %% {sample_retention_policies, - %% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]}, - %% {basic, [{60, 5}, {3600, 60}]}, - %% {detailed, [{10, 5}]}]} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ Shovel Plugin - %% - %% Related doc guide: https://www.rabbitmq.com/shovel.html - %% ---------------------------------------------------------------------------- - - {rabbitmq_shovel, - [{shovels, - [%% A named shovel worker. - %% {my_first_shovel, - %% [ - - %% List the source broker(s) from which to consume. - %% - %% {sources, - %% [%% URI(s) and pre-declarations for all source broker(s). - %% {brokers, ["amqp://user:password@host.domain/my_vhost"]}, - %% {declarations, []} - %% ]}, - - %% List the destination broker(s) to publish to. - %% {destinations, - %% [%% A singular version of the 'brokers' element. - %% {broker, "amqp://"}, - %% {declarations, []} - %% ]}, - - %% Name of the queue to shovel messages from. - %% - %% {queue, <<"your-queue-name-goes-here">>}, - - %% Optional prefetch count. - %% - %% {prefetch_count, 10}, - - %% when to acknowledge messages: - %% - no_ack: never (auto) - %% - on_publish: after each message is republished - %% - on_confirm: when the destination broker confirms receipt - %% - %% {ack_mode, on_confirm}, - - %% Overwrite fields of the outbound basic.publish. - %% - %% {publish_fields, [{exchange, <<"my_exchange">>}, - %% {routing_key, <<"from_shovel">>}]}, - - %% Static list of basic.properties to set on re-publication. - %% - %% {publish_properties, [{delivery_mode, 2}]}, - - %% The number of seconds to wait before attempting to - %% reconnect in the event of a connection failure. - %% - %% {reconnect_delay, 2.5} - - %% ]} %% End of my_first_shovel - ]} - %% Rather than specifying some values per-shovel, you can specify - %% them for all shovels here. - %% - %% {defaults, [{prefetch_count, 0}, - %% {ack_mode, on_confirm}, - %% {publish_fields, []}, - %% {publish_properties, [{delivery_mode, 2}]}, - %% {reconnect_delay, 2.5}]} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ STOMP Plugin - %% - %% Related doc guide: https://www.rabbitmq.com/stomp.html - %% ---------------------------------------------------------------------------- - - {rabbitmq_stomp, - [%% Network Configuration - the format is generally the same as for the broker - - %% Listen only on localhost (ipv4 & ipv6) on a specific port. - %% {tcp_listeners, [{"127.0.0.1", 61613}, - %% {"::1", 61613}]}, - - %% Listen for TLS connections on a specific port. - %% {ssl_listeners, [61614]}, - - %% Number of Erlang processes that will accept connections for the TCP - %% and TLS listeners. - %% - %% {num_tcp_acceptors, 10}, - %% {num_ssl_acceptors, 1}, - - %% Additional TLS options - - %% Extract a name from the client's certificate when using TLS. - %% - %% {ssl_cert_login, true}, - - %% Set a default user name and password. This is used as the default login - %% whenever a CONNECT frame omits the login and passcode headers. - %% - %% Please note that setting this will allow clients to connect without - %% authenticating! - %% - %% {default_user, [{login, "guest"}, - %% {passcode, "guest"}]}, - - %% If a default user is configured, or you have configured use TLS client - %% certificate based authentication, you can choose to allow clients to - %% omit the CONNECT frame entirely. If set to true, the client is - %% automatically connected as the default user or user supplied in the - %% TLS certificate whenever the first frame sent on a session is not a - %% CONNECT frame. - %% - %% {implicit_connect, true}, - - %% Whether or not to enable proxy protocol support. - %% Once enabled, clients cannot directly connect to the broker - %% anymore. They must connect through a load balancer that sends the - %% proxy protocol header to the broker at connection time. - %% This setting applies only to STOMP clients, other protocols - %% like MQTT or AMQP have their own setting to enable proxy protocol. - %% See the plugins or broker documentation for more information. - %% - %% {proxy_protocol, false} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ MQTT Plugin - %% - %% Related doc guide: https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md - %% - %% ---------------------------------------------------------------------------- - - {rabbitmq_mqtt, - [%% Set the default user name and password. Will be used as the default login - %% if a connecting client provides no other login details. - %% - %% Please note that setting this will allow clients to connect without - %% authenticating! - %% - %% {default_user, <<"guest">>}, - %% {default_pass, <<"guest">>}, - - %% Enable anonymous access. If this is set to false, clients MUST provide - %% login information in order to connect. See the default_user/default_pass - %% configuration elements for managing logins without authentication. - %% - %% {allow_anonymous, true}, - - %% If you have multiple chosts, specify the one to which the - %% adapter connects. - %% - %% {vhost, <<"/">>}, - - %% Specify the exchange to which messages from MQTT clients are published. - %% - %% {exchange, <<"amq.topic">>}, - - %% Specify TTL (time to live) to control the lifetime of non-clean sessions. - %% - %% {subscription_ttl, 1800000}, - - %% Set the prefetch count (governing the maximum number of unacknowledged - %% messages that will be delivered). - %% - %% {prefetch, 10}, - - %% TLS listeners. - %% See https://www.rabbitmq.com/networking.html - %% - %% {tcp_listeners, [1883]}, - %% {ssl_listeners, []}, - - %% Number of Erlang processes that will accept connections for the TCP - %% and TLS listeners. - %% See https://www.rabbitmq.com/networking.html - %% - %% {num_tcp_acceptors, 10}, - %% {num_ssl_acceptors, 1}, - - %% TCP socket options. - %% See https://www.rabbitmq.com/networking.html - %% - %% {tcp_listen_options, [ - %% {backlog, 128}, - %% {linger, {true, 0}}, - %% {exit_on_close, false} - %% ]}, - - %% Whether or not to enable proxy protocol support. - %% Once enabled, clients cannot directly connect to the broker - %% anymore. They must connect through a load balancer that sends the - %% proxy protocol header to the broker at connection time. - %% This setting applies only to MQTT clients, other protocols - %% like STOMP or AMQP have their own setting to enable proxy protocol. - %% See the plugins or broker documentation for more information. - %% - %% {proxy_protocol, false} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ AMQP 1.0 Support - %% - %% Related doc guide: https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md - %% - %% ---------------------------------------------------------------------------- - - {rabbitmq_amqp1_0, - [%% Connections that are not authenticated with SASL will connect as this - %% account. See the README for more information. - %% - %% Please note that setting this will allow clients to connect without - %% authenticating! - %% - %% {default_user, "guest"}, - - %% Enable protocol strict mode. See the README for more information. - %% - %% {protocol_strict_mode, false} - ]}, - - %% ---------------------------------------------------------------------------- - %% RabbitMQ LDAP Plugin - %% - %% Related doc guide: https://www.rabbitmq.com/ldap.html. - %% - %% ---------------------------------------------------------------------------- - - {rabbitmq_auth_backend_ldap, - [%% - %% Connecting to the LDAP server(s) - %% ================================ - %% - - %% Specify servers to bind to. You *must* set this in order for the plugin - %% to work properly. - %% - %% {servers, ["your-server-name-goes-here"]}, - - %% Connect to the LDAP server using TLS - %% - %% {use_ssl, false}, - - %% Specify the LDAP port to connect to - %% - %% {port, 389}, - - %% LDAP connection timeout, in milliseconds or 'infinity' - %% - %% {timeout, infinity}, - - %% Enable logging of LDAP queries. - %% One of - %% - false (no logging is performed) - %% - true (verbose logging of the logic used by the plugin) - %% - network (as true, but additionally logs LDAP network traffic) - %% - %% Defaults to false. - %% - %% {log, false}, - - %% - %% Authentication - %% ============== - %% - - %% Pattern to convert the username given through AMQP to a DN before - %% binding - %% - %% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"}, - - %% Alternatively, you can convert a username to a Distinguished - %% Name via an LDAP lookup after binding. See the documentation for - %% full details. - - %% When converting a username to a dn via a lookup, set these to - %% the name of the attribute that represents the user name, and the - %% base DN for the lookup query. - %% - %% {dn_lookup_attribute, "userPrincipalName"}, - %% {dn_lookup_base, "DC=gopivotal,DC=com"}, - - %% Controls how to bind for authorisation queries and also to - %% retrieve the details of users logging in without presenting a - %% password (e.g., SASL EXTERNAL). - %% One of - %% - as_user (to bind as the authenticated user - requires a password) - %% - anon (to bind anonymously) - %% - {UserDN, Password} (to bind with a specified user name and password) - %% - %% Defaults to 'as_user'. - %% - %% {other_bind, as_user}, - - %% - %% Authorisation - %% ============= - %% - - %% The LDAP plugin can perform a variety of queries against your - %% LDAP server to determine questions of authorisation. See - %% https://www.rabbitmq.com/ldap.html#authorisation for more - %% information. - - %% Set the query to use when determining vhost access - %% - %% {vhost_access_query, {in_group, - %% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}}, - - %% Set the query to use when determining resource (e.g., queue) access - %% - %% {resource_access_query, {constant, true}}, - - %% Set queries to determine which tags a user has - %% - %% {tag_queries, []} - ]}, - - %% Lager controls logging. - %% See https://github.com/basho/lager for more documentation - {lager, [ - %% - %% Log directory, taken from the RABBITMQ_LOG_BASE env variable by default. - %% {log_root, "/var/log/rabbitmq"}, - %% - %% All log messages go to the default "sink" configured with - %% the `handlers` parameter. By default, it has a single - %% lager_file_backend handler writing messages to "$nodename.log" - %% (ie. the value of $RABBIT_LOGS). - %% {handlers, [ - %% {lager_file_backend, [{file, "rabbit.log"}, - %% {level, info}, - %% {date, ""}, - %% {size, 0}]} - %% ]}, - %% - %% Extra sinks are used in RabbitMQ to categorize messages. By - %% default, those extra sinks are configured to forward messages - %% to the default sink (see above). "rabbit_log_lager_event" - %% is the default category where all RabbitMQ messages without - %% a category go. Messages in the "channel" category go to the - %% "rabbit_channel_lager_event" Lager extra sink, and so on. - %% {extra_sinks, [ - %% {rabbit_log_lager_event, [{handlers, [ - %% {lager_forwarder_backend, - %% [lager_event, info]}]}]}, - %% {rabbit_channel_lager_event, [{handlers, [ - %% {lager_forwarder_backend, - %% [lager_event, info]}]}]}, - %% {rabbit_connection_lager_event, [{handlers, [ - %% {lager_forwarder_backend, - %% [lager_event, info]}]}]}, - %% {rabbit_mirroring_lager_event, [{handlers, [ - %% {lager_forwarder_backend, - %% [lager_event, info]}]}]} - %% ]} - ]} -]. diff --git a/doc/docker/redis/docker-compose.yaml b/doc/docker/redis/docker-compose.yaml index 5c50a19..573d48f 100644 --- a/doc/docker/redis/docker-compose.yaml +++ b/doc/docker/redis/docker-compose.yaml @@ -6,7 +6,7 @@ services: container_name: redis restart: always ports: - - 6379:6379 + - "6379:6379" volumes: - ./redis.conf:/usr/local/etc/redis/redis.conf:rw - ./data:/data:rw diff --git a/doc/docker/rocketmq/docker-compose-rocketmq.yml b/doc/docker/rocketmq/docker-compose-rocketmq.yml index 42a8649..ebf2244 100644 --- a/doc/docker/rocketmq/docker-compose-rocketmq.yml +++ b/doc/docker/rocketmq/docker-compose-rocketmq.yml @@ -5,7 +5,7 @@ services: image: foxiswho/rocketmq:server container_name: rocketmq_server ports: - - 9876:9876 + - "9876:9876" volumes: - ./rocketmq/rocketmq_server/logs:/opt/logs - ./rocketmq/rocketmq_server/store:/opt/store @@ -19,8 +19,8 @@ services: image: foxiswho/rocketmq:broker container_name: rocketmq_broker ports: - - 10909:10909 - - 10911:10911 + - "10909:10909" + - "10911:10911" volumes: - ./rocketmq/rocketmq_broker/logs:/opt/logs - ./rocketmq/rocketmq_broker/store:/opt/store @@ -42,7 +42,7 @@ services: image: styletang/rocketmq-console-ng container_name: rocketmq_console_ng ports: - - 9002:8080 + - "9002:8080" environment: JAVA_OPTS: "-Drocketmq.namesrv.addr=rocketmq_server:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false" depends_on: diff --git a/docker-compose.yml b/docker-compose.yml index 7cd9d37..eb58488 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -123,9 +123,9 @@ services: - /home/nacos/single-logs/nacos-server:/home/nacos/logs - /home/nacos/init.d:/home/nacos/init.d ports: - - 8848:8848 - - 9848:9848 - - 9849:9849 + - "8848:8848" + - "9848:9848" + - "9849:9849" depends_on: - austin-mysql restart: on-failure From f3462e39cc4a3000cc7e4efc6960da9866ebae58 Mon Sep 17 00:00:00 2001 From: xiaoxiamo <82970607@qq.com> Date: Mon, 22 Jul 2024 23:52:25 +0800 Subject: [PATCH 4/5] =?UTF-8?q?style:=20=E8=A7=84=E8=8C=83=E5=8C=96?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=88=E5=9B=9B=EF=BC=89=EF=BC=9A=E6=B8=85?= =?UTF-8?q?=E7=90=86=E6=97=A0=E7=94=A8=E5=AF=BC=E5=85=A5=E5=8C=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../com/java3y/austin/handler/handler/BaseHandler.java | 1 - .../java3y/austin/handler/handler/impl/SmsHandler.java | 5 ++++- .../web/controller/AlipayMiniProgramController.java | 8 +++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/BaseHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/BaseHandler.java index 125dbae..1125222 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/BaseHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/BaseHandler.java @@ -1,6 +1,5 @@ package com.java3y.austin.handler.handler; -import cn.hutool.core.date.DateUtil; import com.java3y.austin.common.domain.AnchorInfo; import com.java3y.austin.common.domain.TaskInfo; import com.java3y.austin.common.enums.AnchorState; diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java index 68f140c..e445209 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java @@ -24,7 +24,10 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.ApplicationContext; import org.springframework.stereotype.Component; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; /** * 短信发送处理 diff --git a/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java b/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java index a574471..a97d0f6 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java +++ b/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java @@ -1,10 +1,12 @@ package com.java3y.austin.web.controller; -import cn.binarywang.wx.miniapp.api.WxMaService; import cn.hutool.http.HttpUtil; import com.alipay.api.AlipayClient; +import com.alipay.api.domain.AlipayOpenMiniMessageTemplateBatchqueryModel; import com.alipay.api.domain.MerchantMsgTemplateVO; +import com.alipay.api.request.AlipayOpenMiniMessageTemplateBatchqueryRequest; +import com.alipay.api.response.AlipayOpenMiniMessageTemplateBatchqueryResponse; import com.google.common.base.Throwables; import com.java3y.austin.common.constant.SendChanelUrlConstant; import com.java3y.austin.common.dto.account.AlipayMiniProgramAccount; @@ -19,15 +21,11 @@ import com.java3y.austin.web.vo.amis.CommonAmisVo; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import lombok.extern.slf4j.Slf4j; -import me.chanjar.weixin.common.bean.subscribemsg.TemplateInfo; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; -import com.alipay.api.request.AlipayOpenMiniMessageTemplateBatchqueryRequest; -import com.alipay.api.response.AlipayOpenMiniMessageTemplateBatchqueryResponse; -import com.alipay.api.domain.AlipayOpenMiniMessageTemplateBatchqueryModel; import java.util.ArrayList; import java.util.List; From 452dffb6b8d377fb5115973d0a3a1314a23fa829 Mon Sep 17 00:00:00 2001 From: xiaoxiamo <82970607@qq.com> Date: Wed, 14 Aug 2024 17:29:54 +0800 Subject: [PATCH 5/5] =?UTF-8?q?style:=20=E8=A7=84=E8=8C=83=E5=8C=96?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=88=E4=BA=94=EF=BC=89=EF=BC=9A=E8=A7=A3?= =?UTF-8?q?=E5=86=B345=E5=A4=84=E4=BB=A3=E7=A0=81=E7=BC=96=E8=AF=91?= =?UTF-8?q?=E8=AD=A6=E5=91=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../austin/cron/config/CronAsyncThreadPoolConfig.java | 2 +- .../austin/cron/pending/CrowdBatchTaskPending.java | 2 +- .../com/java3y/austin/cron/utils/ReadFileUtils.java | 3 +-- .../cron/xxl/service/impl/CronTaskServiceImpl.java | 5 +++-- .../deduplication/limit/SlideWindowLimitService.java | 2 +- .../handler/handler/impl/DingDingRobotHandler.java | 2 +- .../austin/handler/handler/impl/EmailHandler.java | 4 ++-- .../java3y/austin/handler/handler/impl/SmsHandler.java | 10 +++++----- .../austin/stream/function/AustinFlatMapFunction.java | 2 +- .../java/com/java3y/austin/stream/sink/AustinSink.java | 2 +- .../java3y/austin/stream/utils/LettuceRedisUtils.java | 2 +- .../austin/support/config/SupportThreadPoolConfig.java | 2 +- .../austin/support/service/impl/ConfigServiceImpl.java | 4 ++-- .../java3y/austin/support/utils/ContentHolderUtil.java | 2 +- .../java/com/java3y/austin/support/utils/LogUtils.java | 2 +- .../com/java3y/austin/support/utils/OkHttpUtils.java | 2 +- .../com/java3y/austin/support/utils/RedisUtils.java | 5 +++-- .../main/java/com/java3y/austin/AustinApplication.java | 2 +- .../web/controller/AlipayMiniProgramController.java | 4 ++-- .../web/controller/MessageTemplateController.java | 3 ++- .../austin/web/exception/ExceptionHandlerAdvice.java | 2 +- .../java/com/java3y/austin/web/utils/Convert4Amis.java | 4 ++-- .../com/java3y/austin/web/utils/SpringFileUtils.java | 10 +++++----- 23 files changed, 40 insertions(+), 38 deletions(-) diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/config/CronAsyncThreadPoolConfig.java b/austin-cron/src/main/java/com/java3y/austin/cron/config/CronAsyncThreadPoolConfig.java index 054adfc..670221c 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/config/CronAsyncThreadPoolConfig.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/config/CronAsyncThreadPoolConfig.java @@ -37,7 +37,7 @@ public class CronAsyncThreadPoolConfig { return ExecutorBuilder.create() .setCorePoolSize(ThreadPoolConstant.COMMON_CORE_POOL_SIZE) .setMaxPoolSize(ThreadPoolConstant.COMMON_MAX_POOL_SIZE) - .setWorkQueue(new LinkedBlockingQueue(ThreadPoolConstant.BIG_QUEUE_SIZE)) + .setWorkQueue(new LinkedBlockingQueue<>(ThreadPoolConstant.BIG_QUEUE_SIZE)) .setHandler(new ThreadPoolExecutor.CallerRunsPolicy()) .setAllowCoreThreadTimeOut(true) .setKeepAliveTime(ThreadPoolConstant.SMALL_KEEP_LIVE_TIME, TimeUnit.SECONDS) diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/pending/CrowdBatchTaskPending.java b/austin-cron/src/main/java/com/java3y/austin/cron/pending/CrowdBatchTaskPending.java index 685d065..16dfb73 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/pending/CrowdBatchTaskPending.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/pending/CrowdBatchTaskPending.java @@ -42,7 +42,7 @@ public class CrowdBatchTaskPending extends AbstractLazyPending { public CrowdBatchTaskPending() { PendingParam pendingParam = new PendingParam<>(); - pendingParam.setQueue(new LinkedBlockingQueue(PendingConstant.QUEUE_SIZE)) + pendingParam.setQueue(new LinkedBlockingQueue<>(PendingConstant.QUEUE_SIZE)) .setTimeThreshold(PendingConstant.TIME_THRESHOLD) .setNumThreshold(AustinConstant.BATCH_RECEIVER_SIZE) .setExecutorService(CronAsyncThreadPoolConfig.getConsumePendingThreadPool()); diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/utils/ReadFileUtils.java b/austin-cron/src/main/java/com/java3y/austin/cron/utils/ReadFileUtils.java index 653da76..671b630 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/utils/ReadFileUtils.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/utils/ReadFileUtils.java @@ -10,7 +10,6 @@ import com.java3y.austin.cron.csv.CountFileRowHandler; import com.java3y.austin.cron.vo.CrowdInfoVo; import lombok.extern.slf4j.Slf4j; -import java.io.FileInputStream; import java.io.InputStreamReader; import java.nio.file.Files; import java.nio.file.Paths; @@ -60,7 +59,7 @@ public class ReadFileUtils { // 把首行当做是标题,获取reader try (CsvReader reader = CsvUtil.getReader( - new InputStreamReader(new FileInputStream(path), CharsetUtil.CHARSET_UTF_8), + new InputStreamReader(Files.newInputStream(Paths.get(path)), CharsetUtil.CHARSET_UTF_8), new CsvReadConfig().setContainsHeader(true))) { reader.read(countFileRowHandler); diff --git a/austin-cron/src/main/java/com/java3y/austin/cron/xxl/service/impl/CronTaskServiceImpl.java b/austin-cron/src/main/java/com/java3y/austin/cron/xxl/service/impl/CronTaskServiceImpl.java index 00213fa..27a69dc 100644 --- a/austin-cron/src/main/java/com/java3y/austin/cron/xxl/service/impl/CronTaskServiceImpl.java +++ b/austin-cron/src/main/java/com/java3y/austin/cron/xxl/service/impl/CronTaskServiceImpl.java @@ -6,6 +6,7 @@ import cn.hutool.core.util.StrUtil; import cn.hutool.http.HttpRequest; import cn.hutool.http.HttpResponse; import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.TypeReference; import com.google.common.base.Throwables; import com.java3y.austin.common.enums.RespStatusEnum; import com.java3y.austin.common.vo.BasicResultVO; @@ -48,7 +49,7 @@ public class CronTaskServiceImpl implements CronTaskService { @Override public BasicResultVO saveCronTask(XxlJobInfo xxlJobInfo) { - Map params = JSON.parseObject(JSON.toJSONString(xxlJobInfo), Map.class); + Map params = JSON.parseObject(JSON.toJSONString(xxlJobInfo), new TypeReference>() {}); String path = Objects.isNull(xxlJobInfo.getId()) ? xxlAddresses + XxlJobConstant.INSERT_URL : xxlAddresses + XxlJobConstant.UPDATE_URL; @@ -174,7 +175,7 @@ public class CronTaskServiceImpl implements CronTaskService { @Override public BasicResultVO createGroup(XxlJobGroup xxlJobGroup) { - Map params = JSON.parseObject(JSON.toJSONString(xxlJobGroup), Map.class); + Map params = JSON.parseObject(JSON.toJSONString(xxlJobGroup), new TypeReference>() {}); String path = xxlAddresses + XxlJobConstant.JOB_GROUP_INSERT_URL; HttpResponse response; diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/deduplication/limit/SlideWindowLimitService.java b/austin-handler/src/main/java/com/java3y/austin/handler/deduplication/limit/SlideWindowLimitService.java index ac0a383..5c90e1b 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/deduplication/limit/SlideWindowLimitService.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/deduplication/limit/SlideWindowLimitService.java @@ -37,7 +37,7 @@ public class SlideWindowLimitService extends AbstractLimitService { @PostConstruct public void init() { - redisScript = new DefaultRedisScript(); + redisScript = new DefaultRedisScript<>(); redisScript.setResultType(Long.class); redisScript.setScriptSource(new ResourceScriptSource(new ClassPathResource("limit.lua"))); } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/DingDingRobotHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/DingDingRobotHandler.java index 0c0d885..6b650e0 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/DingDingRobotHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/DingDingRobotHandler.java @@ -128,7 +128,7 @@ public class DingDingRobotHandler extends BaseHandler{ Mac mac = Mac.getInstance(CommonConstant.HMAC_SHA256_ENCRYPTION_ALGO); mac.init(new SecretKeySpec(secret.getBytes(StandardCharsets.UTF_8), CommonConstant.HMAC_SHA256_ENCRYPTION_ALGO)); byte[] signData = mac.doFinal(stringToSign.getBytes(StandardCharsets.UTF_8)); - sign = URLEncoder.encode(new String(Base64.encodeBase64(signData), CommonConstant.CHARSET_UTF_8)); + sign = URLEncoder.encode(new String(Base64.encodeBase64(signData), StandardCharsets.UTF_8), CommonConstant.CHARSET_UTF_8); } catch (Exception e) { log.error("DingDingHandler#assembleSign fail!:{}", Throwables.getStackTraceAsString(e)); } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/EmailHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/EmailHandler.java index a86c652..0640f84 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/EmailHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/EmailHandler.java @@ -45,7 +45,7 @@ public class EmailHandler extends BaseHandler{ channelCode = ChannelType.EMAIL.getCode(); // 按照请求限流,默认单机 3 qps (具体数值配置在apollo动态调整) - Double rateInitValue = Double.valueOf(3); + double rateInitValue = 3.0; flowControlParam = FlowControlParam.builder().rateInitValue(rateInitValue) .rateLimitStrategy(RateLimitStrategy.REQUEST_RATE_LIMIT) .rateLimiter(RateLimiter.create(rateInitValue)).build(); @@ -61,7 +61,7 @@ public class EmailHandler extends BaseHandler{ if (CollUtil.isEmpty(files)) { MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true); } else { - MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true, files.toArray(new File[files.size()])); + MailUtil.send(account, taskInfo.getReceiver(), emailContentModel.getTitle(), emailContentModel.getContent(), true, files.toArray(new File[0])); } diff --git a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java index 5ce1112..cb956aa 100644 --- a/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java +++ b/austin-handler/src/main/java/com/java3y/austin/handler/handler/impl/SmsHandler.java @@ -45,9 +45,9 @@ public class SmsHandler extends BaseHandler{ private static final String FLOW_KEY = "msgTypeSmsConfig"; private static final String FLOW_KEY_PREFIX = "message_type_"; /** - * 安全随机数,重用性能与随机数质量更高 + * 安全随机数,性能与随机数质量更高 */ - private static final SecureRandom secureRandom = new SecureRandom(); + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); @Autowired private SmsRecordDao smsRecordDao; @@ -104,10 +104,10 @@ public class SmsHandler extends BaseHandler{ } // 生成一个随机数[1,total],看落到哪个区间 - int index = secureRandom.nextInt(total) + 1; + int index = SECURE_RANDOM.nextInt(total) + 1; - MessageTypeSmsConfig supplier = null; - MessageTypeSmsConfig supplierBack = null; + MessageTypeSmsConfig supplier; + MessageTypeSmsConfig supplierBack; for (int i = 0; i < messageTypeSmsConfigs.size(); ++i) { if (index <= messageTypeSmsConfigs.get(i).getWeights()) { supplier = messageTypeSmsConfigs.get(i); diff --git a/austin-stream/src/main/java/com/java3y/austin/stream/function/AustinFlatMapFunction.java b/austin-stream/src/main/java/com/java3y/austin/stream/function/AustinFlatMapFunction.java index 63a742c..a9cbe26 100644 --- a/austin-stream/src/main/java/com/java3y/austin/stream/function/AustinFlatMapFunction.java +++ b/austin-stream/src/main/java/com/java3y/austin/stream/function/AustinFlatMapFunction.java @@ -13,7 +13,7 @@ import org.apache.flink.util.Collector; public class AustinFlatMapFunction implements FlatMapFunction { @Override - public void flatMap(String value, Collector collector) throws Exception { + public void flatMap(String value, Collector collector){ AnchorInfo anchorInfo = JSON.parseObject(value, AnchorInfo.class); collector.collect(anchorInfo); } diff --git a/austin-stream/src/main/java/com/java3y/austin/stream/sink/AustinSink.java b/austin-stream/src/main/java/com/java3y/austin/stream/sink/AustinSink.java index dca72f7..ac38924 100644 --- a/austin-stream/src/main/java/com/java3y/austin/stream/sink/AustinSink.java +++ b/austin-stream/src/main/java/com/java3y/austin/stream/sink/AustinSink.java @@ -28,7 +28,7 @@ import java.util.List; public class AustinSink implements SinkFunction { @Override - public void invoke(AnchorInfo anchorInfo, Context context) throws Exception { + public void invoke(AnchorInfo anchorInfo, Context context){ realTimeData(anchorInfo); } diff --git a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java index 6bdaba7..40b868d 100644 --- a/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java +++ b/austin-stream/src/main/java/com/java3y/austin/stream/utils/LettuceRedisUtils.java @@ -48,7 +48,7 @@ public class LettuceRedisUtils { commands.flushCommands(); LettuceFutures.awaitAll(10, TimeUnit.SECONDS, - futures.toArray(new RedisFuture[futures.size()])); + futures.toArray(new RedisFuture[0])); connect.close(); } diff --git a/austin-support/src/main/java/com/java3y/austin/support/config/SupportThreadPoolConfig.java b/austin-support/src/main/java/com/java3y/austin/support/config/SupportThreadPoolConfig.java index af28350..ce9dacc 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/config/SupportThreadPoolConfig.java +++ b/austin-support/src/main/java/com/java3y/austin/support/config/SupportThreadPoolConfig.java @@ -26,7 +26,7 @@ public class SupportThreadPoolConfig { return ExecutorBuilder.create() .setCorePoolSize(ThreadPoolConstant.SINGLE_CORE_POOL_SIZE) .setMaxPoolSize(ThreadPoolConstant.SINGLE_MAX_POOL_SIZE) - .setWorkQueue(new LinkedBlockingQueue(ThreadPoolConstant.BIG_QUEUE_SIZE)) + .setWorkQueue(new LinkedBlockingQueue<>(ThreadPoolConstant.BIG_QUEUE_SIZE)) .setHandler(new ThreadPoolExecutor.CallerRunsPolicy()) .setAllowCoreThreadTimeOut(true) .setKeepAliveTime(ThreadPoolConstant.SMALL_KEEP_LIVE_TIME, TimeUnit.SECONDS) diff --git a/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java b/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java index dd8086e..0a726d8 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java +++ b/austin-support/src/main/java/com/java3y/austin/support/service/impl/ConfigServiceImpl.java @@ -23,7 +23,7 @@ public class ConfigServiceImpl implements ConfigService { * 本地配置 */ private static final String PROPERTIES_PATH = "local.properties"; - private final Props props = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8); + private final Props PROPS = new Props(PROPERTIES_PATH, StandardCharsets.UTF_8); /** * apollo配置 @@ -49,7 +49,7 @@ public class ConfigServiceImpl implements ConfigService { } else if (Boolean.TRUE.equals(enableNacos)) { return nacosUtils.getProperty(key, defaultValue); } else { - return props.getProperty(key, defaultValue); + return PROPS.getProperty(key, defaultValue); } } } diff --git a/austin-support/src/main/java/com/java3y/austin/support/utils/ContentHolderUtil.java b/austin-support/src/main/java/com/java3y/austin/support/utils/ContentHolderUtil.java index c1b8cd3..96e3572 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/utils/ContentHolderUtil.java +++ b/austin-support/src/main/java/com/java3y/austin/support/utils/ContentHolderUtil.java @@ -45,7 +45,7 @@ public class ContentHolderUtil { @Override public String resolvePlaceholder(String placeholderName) { if (Objects.isNull(paramMap)) { - String errorStr = MessageFormat.format("template:{0} require param:{1},but not exist! paramMap:{2}", template, placeholderName, paramMap); + String errorStr = MessageFormat.format("template:{0} require param:{1},but not exist! paramMap:{2}", template, placeholderName, null); throw new IllegalArgumentException(errorStr); } String value = paramMap.get(placeholderName); diff --git a/austin-support/src/main/java/com/java3y/austin/support/utils/LogUtils.java b/austin-support/src/main/java/com/java3y/austin/support/utils/LogUtils.java index 3464c97..38ccb86 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/utils/LogUtils.java +++ b/austin-support/src/main/java/com/java3y/austin/support/utils/LogUtils.java @@ -31,7 +31,7 @@ public class LogUtils extends CustomLogListener { * 方法切面的日志 @OperationLog 所产生 */ @Override - public void createLog(LogDTO logDTO) throws Exception { + public void createLog(LogDTO logDTO){ log.info(JSON.toJSONString(logDTO)); } diff --git a/austin-support/src/main/java/com/java3y/austin/support/utils/OkHttpUtils.java b/austin-support/src/main/java/com/java3y/austin/support/utils/OkHttpUtils.java index 2da5a1f..542d24f 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/utils/OkHttpUtils.java +++ b/austin-support/src/main/java/com/java3y/austin/support/utils/OkHttpUtils.java @@ -177,7 +177,7 @@ public class OkHttpUtils { private String execute(Request request) { try (Response response = okHttpClient.newCall(request).execute()) { if (response.isSuccessful()) { - return response.body().string(); + return String.valueOf(response.body()); } } catch (Exception e) { log.error(Throwables.getStackTraceAsString(e)); diff --git a/austin-support/src/main/java/com/java3y/austin/support/utils/RedisUtils.java b/austin-support/src/main/java/com/java3y/austin/support/utils/RedisUtils.java index 788c8cc..fd7cc66 100644 --- a/austin-support/src/main/java/com/java3y/austin/support/utils/RedisUtils.java +++ b/austin-support/src/main/java/com/java3y/austin/support/utils/RedisUtils.java @@ -171,14 +171,15 @@ public class RedisUtils { */ public Boolean execLimitLua(RedisScript redisScript, List keys, String... args) { + // 可变参数转数组 + String[] argsArray = args != null ? args : new String[0]; try { - Long execute = redisTemplate.execute(redisScript, keys, args); + Long execute = redisTemplate.execute(redisScript, keys, (Object[]) argsArray); if (Objects.isNull(execute)) { return false; } return CommonConstant.TRUE.equals(execute.intValue()); } catch (Exception e) { - log.error("redis execLimitLua fail! e:{}", Throwables.getStackTraceAsString(e)); } return false; diff --git a/austin-web/src/main/java/com/java3y/austin/AustinApplication.java b/austin-web/src/main/java/com/java3y/austin/AustinApplication.java index 7676bb6..a3aaeee 100644 --- a/austin-web/src/main/java/com/java3y/austin/AustinApplication.java +++ b/austin-web/src/main/java/com/java3y/austin/AustinApplication.java @@ -34,7 +34,7 @@ public class AustinApplication implements CommandLineRunner { } @Override - public void run(String... args) throws Exception { + public void run(String... args) { log.info(AnsiOutput.toString(AustinConstant.PROJECT_BANNER, "\n", AnsiColor.GREEN, AustinConstant.PROJECT_NAME, AnsiColor.DEFAULT, AnsiStyle.FAINT)); log.info("Austin start succeeded, Index >> http://127.0.0.1:{}/", serverPort); log.info("Austin start succeeded, Swagger Url >> http://127.0.0.1:{}/swagger-ui/index.html", serverPort); diff --git a/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java b/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java index a97d0f6..129580b 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java +++ b/austin-web/src/main/java/com/java3y/austin/web/controller/AlipayMiniProgramController.java @@ -60,7 +60,7 @@ public class AlipayMiniProgramController { AlipayOpenMiniMessageTemplateBatchqueryModel model = new AlipayOpenMiniMessageTemplateBatchqueryModel(); // 设置子板状态列表 - List statusList = new ArrayList(); + List statusList = new ArrayList<>(); statusList.add("STARTED"); model.setStatusList(statusList); @@ -112,7 +112,7 @@ public class AlipayMiniProgramController { AlipayOpenMiniMessageTemplateBatchqueryModel model = new AlipayOpenMiniMessageTemplateBatchqueryModel(); // 设置子板状态列表 - List statusList = new ArrayList(); + List statusList = new ArrayList<>(); statusList.add("STARTED"); model.setStatusList(statusList); diff --git a/austin-web/src/main/java/com/java3y/austin/web/controller/MessageTemplateController.java b/austin-web/src/main/java/com/java3y/austin/web/controller/MessageTemplateController.java index ea62a21..cbd42bb 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/controller/MessageTemplateController.java +++ b/austin-web/src/main/java/com/java3y/austin/web/controller/MessageTemplateController.java @@ -5,6 +5,7 @@ import cn.hutool.core.text.CharSequenceUtil; import cn.hutool.core.text.StrPool; import cn.hutool.core.util.IdUtil; import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.TypeReference; import com.google.common.base.Throwables; import com.java3y.austin.common.enums.RespStatusEnum; import com.java3y.austin.common.vo.BasicResultVO; @@ -137,7 +138,7 @@ public class MessageTemplateController { @ApiOperation("/测试发送接口") public SendResponse test(@RequestBody MessageTemplateParam messageTemplateParam) { - Map variables = JSON.parseObject(messageTemplateParam.getMsgContent(), Map.class); + Map variables = JSON.parseObject(messageTemplateParam.getMsgContent(), new TypeReference>() {}); MessageParam messageParam = MessageParam.builder().receiver(messageTemplateParam.getReceiver()).variables(variables).build(); SendRequest sendRequest = SendRequest.builder().code(BusinessCode.COMMON_SEND.getCode()).messageTemplateId(messageTemplateParam.getId()).messageParam(messageParam).build(); SendResponse response = sendService.send(sendRequest); diff --git a/austin-web/src/main/java/com/java3y/austin/web/exception/ExceptionHandlerAdvice.java b/austin-web/src/main/java/com/java3y/austin/web/exception/ExceptionHandlerAdvice.java index 461ab6f..e952876 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/exception/ExceptionHandlerAdvice.java +++ b/austin-web/src/main/java/com/java3y/austin/web/exception/ExceptionHandlerAdvice.java @@ -35,7 +35,7 @@ public class ExceptionHandlerAdvice { @ResponseStatus(HttpStatus.OK) public BasicResultVO commonResponse(CommonException ce) { log.error(Throwables.getStackTrace(ce)); - return new BasicResultVO(ce.getCode(), ce.getMessage(), ce.getRespStatusEnum()); + return new BasicResultVO<>(ce.getCode(), ce.getMessage(), ce.getRespStatusEnum()); } } diff --git a/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java b/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java index 67416a9..58d88de 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java +++ b/austin-web/src/main/java/com/java3y/austin/web/utils/Convert4Amis.java @@ -465,11 +465,11 @@ public class Convert4Amis { itemsVO.setBusinessId(String.valueOf(smsRecord.getMessageTemplateId())); itemsVO.setContent(smsRecord.getMsgContent()); itemsVO.setSendType(EnumUtil.getDescriptionByCode(smsRecord.getStatus(), SmsStatus.class)); - itemsVO.setSendTime(DateUtil.format(new Date(Long.valueOf(smsRecord.getCreated() * 1000L)), DatePattern.NORM_DATETIME_PATTERN)); + itemsVO.setSendTime(DateUtil.format(new Date(smsRecord.getCreated() * 1000L), DatePattern.NORM_DATETIME_PATTERN)); } else { itemsVO.setReceiveType(EnumUtil.getDescriptionByCode(smsRecord.getStatus(), SmsStatus.class)); itemsVO.setReceiveContent(smsRecord.getReportContent()); - itemsVO.setReceiveTime(DateUtil.format(new Date(Long.valueOf(smsRecord.getUpdated() * 1000L)), DatePattern.NORM_DATETIME_PATTERN)); + itemsVO.setReceiveTime(DateUtil.format(new Date(smsRecord.getUpdated() * 1000L), DatePattern.NORM_DATETIME_PATTERN)); } } itemsVoS.add(itemsVO); diff --git a/austin-web/src/main/java/com/java3y/austin/web/utils/SpringFileUtils.java b/austin-web/src/main/java/com/java3y/austin/web/utils/SpringFileUtils.java index 021845a..e65010f 100644 --- a/austin-web/src/main/java/com/java3y/austin/web/utils/SpringFileUtils.java +++ b/austin-web/src/main/java/com/java3y/austin/web/utils/SpringFileUtils.java @@ -4,9 +4,9 @@ import lombok.extern.slf4j.Slf4j; import org.springframework.web.multipart.MultipartFile; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Files; /** @@ -27,13 +27,13 @@ public class SpringFileUtils { public static File getFile(MultipartFile multipartFile) { String fileName = multipartFile.getOriginalFilename(); File file = new File(fileName); - try (OutputStream out = new FileOutputStream(file)){ + try (OutputStream out = Files.newOutputStream(file.toPath())){ byte[] ss = multipartFile.getBytes(); - for (int i = 0; i < ss.length; i++) { - out.write(ss[i]); + for (byte s : ss) { + out.write(s); } } catch (IOException e) { - log.error("SpringFileUtils#getFile multipartFile is converted to File error:{}", e); + log.error("SpringFileUtils#getFile multipartFile is converted to File error:{}", e.toString()); return null; } return file;