!176 ♥️发布 2.2.0-BETA2 公测版本

Merge pull request !176 from 疯狂的狮子Li/dev
This commit is contained in:
疯狂的狮子Li
2024-07-01 08:52:35 +00:00
committed by Gitee
278 changed files with 2060 additions and 287 deletions

View File

@@ -61,7 +61,7 @@ CCFlow 驰聘低代码-流程-表单 - https://gitee.com/opencc/RuoYi-JFlow <br>
| 数据脱敏 | 采用 注解 + jackson 序列化期间脱敏 支持不同模块不同的脱敏条件<br/>支持多种策略 如身份证、手机号、地址、邮箱、银行卡等 可自行扩展 | 无 |
| 数据加解密 | 采用 注解 + mybatis 拦截器 对存取数据期间自动加解密<br/>支持多种策略 如BASE64、AES、RSA、SM2、SM4等 | 无 |
| 数据翻译 | 采用 注解 + jackson 序列化期间动态修改数据 数据进行翻译<br/>支持多种模式: `映射翻译` `直接翻译` `其他扩展条件翻译` 接口化两步即可完成自定义扩展 内置多种翻译实现 | 无 |
| 多数据源框架 | 采用 dynamic-datasource 支持面大部分数据库<br/>通过yml配置即可动态管理异构不同种类的数据库 也可通过前端页面添加数据源<br/>支持spel表达式从请求头参数等条件切换数据源 | 基于 druid 手动编写代码配置数据源 配置繁琐 支持性差 |
| 多数据源框架 | 采用 dynamic-datasource 支持面大部分数据库<br/>通过yml配置即可动态管理异构不同种类的数据库 也可通过前端页面添加数据源<br/>支持spel表达式从请求头参数等条件切换数据源 | 基于 druid 手动编写代码配置数据源 配置繁琐 支持性差 |
| 多数据源事务 | 采用 dynamic-datasource 支持多数据源不同种类的数据库事务回滚 | 不支持 |
| 数据库连接池 | 采用 HikariCP Spring官方内置连接池 配置简单 以性能与稳定性闻名天下 | 采用 druid bug众多 社区维护差 活跃度低 配置众多繁琐性能一般 |
| 数据库主键 | 采用 雪花ID 基于时间戳的 有序增长 唯一ID 再也不用为分库分表 数据合并主键冲突重复而发愁 | 采用 数据库自增ID 支持数据量有限 不支持多数据源主键唯一 |

View File

@@ -85,6 +85,16 @@ spring:
# 控制台地址 从1.3.0开始使用 server-name 注册
# dashboard: localhost:8718
bus:
id: ${spring.application.name}
base-packages: org.dromara.**.event
# 消息总线 也可以使用 kafka 参考 spring-cloud-bus 用法
rabbitmq:
host: localhost
port: 5672
username: ruoyi
password: ruoyi123
# redis通用配置 子服务可以自行配置进行覆盖
data:
redis:

View File

@@ -5,6 +5,7 @@ security:
enabled: true
excludeUrls:
- /system/notice
- /workflow/model/save
- /workflow/model/editModelXml
# 不校验白名单
ignore:

View File

@@ -24,6 +24,8 @@ snail-job:
server-name: ruoyi-snailjob-server
# 服务名优先 ip垫底
host: 127.0.0.1
port: 1788
port: 17888
# 详见 script/sql/ry_job.sql `sj_namespace` 表
namespace: ${spring.profiles.active}
# 随主应用端口飘逸
port: 2${server.port}

View File

@@ -21,7 +21,7 @@ snail-job:
# 拉取重试数据的每批次的大小
job-pull-page-size: 1000
# 服务端 netty 端口
netty-port: 1788
netty-port: 17888
# 重试和死信表的分区总数
total-partition: 2
# 一个客户端每秒最多接收的重试数量指令

View File

@@ -173,7 +173,7 @@ services:
TZ: Asia/Shanghai
ports:
- "8800:8800"
- "1788:1788"
- "17888:17888"
volumes:
- /docker/snailjob/logs/:/ruoyi/snailjob/logs
privileged: true
@@ -363,19 +363,22 @@ services:
- "10909:10909"
- "10912:10912"
environment:
JAVA_OPT_EXT: -server -Xms512M -Xmx512M -Xmn256m
JAVA_OPT: -server -Xms512M -Xmx512M
NAMESRV_ADDR: 127.0.0.1:9876
# --enable-proxy 开启broker与proxy共用模式 生产部署建议将proxy单独部署
command: sh mqbroker --enable-proxy -c /home/rocketmq/rocketmq-5.2.0/conf/broker.conf
depends_on:
- mqnamesrv
- rmqnamesrv
volumes:
- /docker/rocketmq/broker1/conf/broker.conf:/home/rocketmq/rocketmq-5.2.0/conf/broker.conf
- /docker/rocketmq/broker1/logs:/home/rocketmq/logs/rocketmqlogs
- /docker/rocketmq/broker1/store:/home/rocketmq/store
privileged: true
network_mode: "host"
rmqconsole:
image: styletang/rocketmq-console-ng
container_name: mqconsole
image: apacherocketmq/rocketmq-dashboard:latest
container_name: rmqconsole
ports:
- "19876:19876"
environment:
@@ -453,7 +456,7 @@ services:
network_mode: "host"
sky-oap:
image: apache/skywalking-oap-server:9.3.0
image: apache/skywalking-oap-server:9.7.0
container_name: sky-oap
ports:
- "11800:11800"
@@ -470,7 +473,7 @@ services:
network_mode: "host"
sky-ui:
image: apache/skywalking-ui:9.3.0
image: apache/skywalking-ui:9.7.0
container_name: sky-ui
ports:
- "18080:18080"

View File

@@ -56,7 +56,7 @@ http {
# }
# 限制外网访问内网 actuator 相关路径
location ~ ^(/[^/]*)?/actuator(/.*)?$ {
location ~ ^(/[^/]*)?/actuator.*(/.*)?$ {
return 403;
}

View File

@@ -15,8 +15,6 @@ flushDiskType = ASYNC_FLUSH
# 设置broker节点所在服务器的ip地址**这个非常重要,主从模式下从节点会根据主节点的brokerIP2来同步数据如果不配置主从无法同步brokerIP1设置为自己外网能访问的ip服务器双网卡情况下必须配置比如阿里云这种主节点需要配置ip1和ip2从节点只需要配置ip1即可
# 此ip由使用环境决定 本机使用 127 局域网使用 192 外网使用 外网ip
brokerIP1 = 192.168.31.165
#nameServer地址分号分割
namesrvAddr = 127.0.0.1:9876
#Broker 对外服务的监听端口,
listenPort = 10911
#是否允许Broker自动创建Topic

View File

@@ -215,12 +215,13 @@ Apache 2.0 licenses
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
raphw (byte-buddy) 1.12.19: http://bytebuddy.net/ , Apache 2.0
Google: grpc-java 1.50.0: https://github.com/grpc/grpc-java, Apache 2.0
raphw (byte-buddy) 1.14.9: http://bytebuddy.net/ , Apache 2.0
Google: grpc-java 1.53.0: https://github.com/grpc/grpc-java, Apache 2.0
Google: gson 2.8.9: https://github.com/google/gson , Apache 2.0
Google: proto-google-common-protos 2.0.1: https://github.com/googleapis/googleapis , Apache 2.0
Google: jsr305 3.0.2: http://central.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.pom , Apache 2.0
netty 4.1.86: https://github.com/netty/netty/blob/4.1/LICENSE.txt, Apache 2.0
Google: guava 32.0.1: https://github.com/google/guava , Apache 2.0
netty 4.1.100: https://github.com/netty/netty/blob/4.1/LICENSE.txt, Apache 2.0
========================================================================
BSD licenses

View File

@@ -1,5 +1,5 @@
Apache SkyWalking
Copyright 2017-2023 The Apache Software Foundation
Copyright 2017-2024 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@@ -20,6 +20,7 @@ collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
# ${service name} = [${group name}::]${logic name}
# The group name is optional only.
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}
agent.service_name#length=${SW_AGENT_NAME_MAX_LENGTH:50}
# The agent namespace
agent.namespace=${SW_AGENT_NAMESPACE:}
@@ -48,18 +49,10 @@ agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,
# SkyWalking team may ask for these files in order to resolve compatible problem.
agent.is_open_debugging_class=${SW_AGENT_OPEN_DEBUG:false}
# If true, SkyWalking agent will cache all instrumented classes files to memory or disk files (decided by class cache mode),
# allow other javaagent to enhance those classes that enhanced by SkyWalking agent.
agent.is_cache_enhanced_class=${SW_AGENT_CACHE_CLASS:false}
# The instrumented classes cache mode: MEMORY or FILE
# MEMORY: cache class bytes to memory, if instrumented classes is too many or too large, it may take up more memory
# FILE: cache class bytes in `/class-cache` folder, automatically clean up cached class files when the application exits
agent.class_cache_mode=${SW_AGENT_CLASS_CACHE_MODE:MEMORY}
# Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will
# generate an 32-bit uuid. BY Default, SkyWalking uses UUID@hostname as the instance name. Max length is 50(UTF-8 char)
agent.instance_name=${SW_AGENT_INSTANCE_NAME:}
agent.instance_name#length=${SW_AGENT_INSTANCE_NAME_MAX_LENGTH:50}
# service instance properties in json format. e.g. agent.instance_properties_json = {"org": "apache-skywalking"}
agent.instance_properties_json=${SW_INSTANCE_PROPERTIES_JSON:}
@@ -163,8 +156,10 @@ buffer.channel_size=${SW_BUFFER_CHANNEL_SIZE:5}
buffer.buffer_size=${SW_BUFFER_BUFFER_SIZE:300}
# If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile.
profile.active=${SW_AGENT_PROFILE_ACTIVE:true}
# Parallel monitor segment count
# Parallel monitor endpoint thread count
profile.max_parallel=${SW_AGENT_PROFILE_MAX_PARALLEL:5}
# Max monitoring sub-tasks count of one single endpoint access
profile.max_accept_sub_parallel=${SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL:5}
# Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it.
profile.duration=${SW_AGENT_PROFILE_DURATION:10}
# Max dump thread stack depth
@@ -246,7 +241,7 @@ plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092}
plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10}
# Kafka producer configuration. Read [producer configure](http://kafka.apache.org/24/documentation.html#producerconfigs)
# to get more details. Check document for more details and examples.
plugin.kafka.producer_config=${sw_plugin_kafka_producer_config:}
plugin.kafka.producer_config=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG:}
# Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication.
plugin.kafka.producer_config_json=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON:}
# Specify which Kafka topic name for Meter System data to report to.
@@ -263,6 +258,8 @@ plugin.kafka.topic_management=${SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT:skywalking-mana
plugin.kafka.topic_logging=${SW_PLUGIN_KAFKA_TOPIC_LOGGING:skywalking-logs}
# isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with `-` ).
plugin.kafka.namespace=${SW_KAFKA_NAMESPACE:}
# Specify which class to decode encoded configuration of kafka.You can set encoded information in `plugin.kafka.producer_config_json` or `plugin.kafka.producer_config` if you need.
plugin.kafka.decode_class=${SW_KAFKA_DECODE_CLASS:}
# Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when `Spring annotation plugin` has been activated.
plugin.springannotation.classname_match_regex=${SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX:}
# Whether or not to transmit logged data as formatted or un-formatted.
@@ -271,6 +268,10 @@ plugin.toolkit.log.transmit_formatted=${SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED
plugin.lettuce.trace_redis_parameters=${SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS:false}
# If set to positive number and `plugin.lettuce.trace_redis_parameters` is set to `true`, Redis command parameters would be collected and truncated to this length.
plugin.lettuce.redis_parameter_max_length=${SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH:128}
# Specify which command should be converted to write operation
plugin.lettuce.operation_mapping_write=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
# Specify which command should be converted to read operation
plugin.lettuce.operation_mapping_read=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
# If set to true, the parameters of the cypher would be collected.
plugin.neo4j.trace_cypher_parameters=${SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS:false}
# If set to positive number, the `db.cypher.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
@@ -311,3 +312,13 @@ plugin.redisson.redis_parameter_max_length=${SW_PLUGIN_REDISSON_REDIS_PARAMETER_
plugin.redisson.operation_mapping_write=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
# Specify which command should be converted to read operation
plugin.redisson.operation_mapping_read=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
# This config item controls that whether the Netty-http plugin should collect the http body of the request.
plugin.nettyhttp.collect_request_body=${SW_PLUGIN_NETTYHTTP_COLLECT_REQUEST_BODY:false}
# When `HTTP_COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
plugin.nettyhttp.filter_length_limit=${SW_PLUGIN_NETTYHTTP_FILTER_LENGTH_LIMIT:1024}
# When `HTTP_COLLECT_REQUEST_BODY` is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
plugin.nettyhttp.supported_content_types_prefix=${SW_PLUGIN_NETTYHTTP_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
# If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.
plugin.rocketmqclient.collect_message_keys=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_KEYS:false}
# If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.
plugin.rocketmqclient.collect_message_tags=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_TAGS:false}

Some files were not shown because too many files have changed in this diff Show More