mirror of https://github.com/longtai-cn/hippo4j
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
275 lines
20 KiB
275 lines
20 KiB
2 years ago
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||
|
# or more contributor license agreements. See the NOTICE file
|
||
|
# distributed with this work for additional information
|
||
|
# regarding copyright ownership. The ASF licenses this file
|
||
|
# to you under the Apache License, Version 2.0 (the
|
||
|
# "License"); you may not use this file except in compliance
|
||
|
# with the License. You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
|
||
|
# The service name in UI
|
||
|
# ${service name} = [${group name}::]${logic name}
|
||
|
# The group name is optional only.
|
||
|
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}
|
||
|
|
||
|
# The agent namespace
|
||
|
agent.namespace=${SW_AGENT_NAMESPACE:}
|
||
|
|
||
|
# The agent cluster
|
||
|
agent.cluster=${SW_AGENT_CLUSTER:}
|
||
|
|
||
|
# The number of sampled traces per 3 seconds
|
||
|
# Negative or zero means off, by default
|
||
|
agent.sample_n_per_3_secs=${SW_AGENT_SAMPLE:-1}
|
||
|
|
||
|
# Authentication active is based on backend setting, see application.yml for more details.
|
||
|
agent.authentication=${SW_AGENT_AUTHENTICATION:}
|
||
|
|
||
|
# The max number of TraceSegmentRef in a single span to keep memory cost estimatable.
|
||
|
agent.trace_segment_ref_limit_per_span=${SW_TRACE_SEGMENT_LIMIT:500}
|
||
|
|
||
|
# The max amount of spans in a single segment.
|
||
|
# Through this config item, SkyWalking keep your application memory cost estimated.
|
||
|
agent.span_limit_per_segment=${SW_AGENT_SPAN_LIMIT:300}
|
||
|
|
||
|
# If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by `,`.
|
||
|
agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg}
|
||
|
|
||
|
# If true, SkyWalking agent will save all instrumented classes files in `/debugging` folder.
|
||
|
# SkyWalking team may ask for these files in order to resolve compatible problem.
|
||
|
agent.is_open_debugging_class=${SW_AGENT_OPEN_DEBUG:false}
|
||
|
|
||
|
# If true, SkyWalking agent will cache all instrumented classes files to memory or disk files (decided by class cache mode),
|
||
|
# allow other javaagent to enhance those classes that enhanced by SkyWalking agent.
|
||
|
agent.is_cache_enhanced_class=${SW_AGENT_CACHE_CLASS:false}
|
||
|
|
||
|
# The instrumented classes cache mode: MEMORY or FILE
|
||
|
# MEMORY: cache class bytes to memory, if instrumented classes is too many or too large, it may take up more memory
|
||
|
# FILE: cache class bytes in `/class-cache` folder, automatically clean up cached class files when the application exits
|
||
|
agent.class_cache_mode=${SW_AGENT_CLASS_CACHE_MODE:MEMORY}
|
||
|
|
||
|
# Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will
|
||
|
# generate an 32-bit uuid. BY Default, SkyWalking uses UUID@hostname as the instance name. Max length is 50(UTF-8 char)
|
||
|
agent.instance_name=${SW_AGENT_INSTANCE_NAME:}
|
||
|
|
||
|
# service instance properties in json format. e.g. agent.instance_properties_json = {"org": "apache-skywalking"}
|
||
|
agent.instance_properties_json=${SW_INSTANCE_PROPERTIES_JSON:}
|
||
|
|
||
|
# How depth the agent goes, when log all cause exceptions.
|
||
|
agent.cause_exception_depth=${SW_AGENT_CAUSE_EXCEPTION_DEPTH:5}
|
||
|
|
||
|
# Force reconnection period of grpc, based on grpc_channel_check_interval.
|
||
|
agent.force_reconnection_period=${SW_AGENT_FORCE_RECONNECTION_PERIOD:1}
|
||
|
|
||
|
# The operationName max length
|
||
|
# Notice, in the current practice, we don't recommend the length over 190.
|
||
|
agent.operation_name_threshold=${SW_AGENT_OPERATION_NAME_THRESHOLD:150}
|
||
|
|
||
|
# Keep tracing even the backend is not available if this value is true.
|
||
|
agent.keep_tracing=${SW_AGENT_KEEP_TRACING:false}
|
||
|
|
||
|
# The agent use gRPC plain text in default.
|
||
|
# If true, SkyWalking agent uses TLS even no CA file detected.
|
||
|
agent.force_tls=${SW_AGENT_FORCE_TLS:false}
|
||
|
|
||
|
# gRPC SSL trusted ca file.
|
||
|
agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt}
|
||
|
|
||
|
# enable mTLS when ssl_key_path and ssl_cert_chain_path exist.
|
||
|
agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:}
|
||
|
|
||
|
agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:}
|
||
|
|
||
|
# Limit the length of the ipv4 list size.
|
||
|
osinfo.ipv4_list_size=${SW_AGENT_OSINFO_IPV4_LIST_SIZE:10}
|
||
|
|
||
|
# grpc channel status check interval.
|
||
|
collector.grpc_channel_check_interval=${SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL:30}
|
||
|
# Agent heartbeat report period. Unit, second.
|
||
|
collector.heartbeat_period=${SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD:30}
|
||
|
# The agent sends the instance properties to the backend every
|
||
|
# collector.heartbeat_period * collector.properties_report_period_factor seconds
|
||
|
collector.properties_report_period_factor=${SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR:10}
|
||
|
# Backend service addresses.
|
||
|
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
|
||
|
# How long grpc client will timeout in sending data to upstream. Unit is second.
|
||
|
collector.grpc_upstream_timeout=${SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT:30}
|
||
|
# Sniffer get profile task list interval.
|
||
|
collector.get_profile_task_interval=${SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL:20}
|
||
|
# Sniffer get agent dynamic config interval.
|
||
|
collector.get_agent_dynamic_config_interval=${SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL:20}
|
||
|
# If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses.
|
||
|
collector.is_resolve_dns_periodically=${SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY:false}
|
||
|
|
||
|
# Logging level
|
||
|
logging.level=${SW_LOGGING_LEVEL:INFO}
|
||
|
# Logging file_name
|
||
2 years ago
|
logging.file_name=${SW_LOGGING_FILE_NAME:hippo4j-api.log}
|
||
2 years ago
|
# Log output. Default is FILE. Use CONSOLE means output to stdout.
|
||
|
logging.output=${SW_LOGGING_OUTPUT:FILE}
|
||
2 years ago
|
# Log files directory. Default is blank string, meaning use "{theHippo4jAgentJarDir}/logs " to output logs.
|
||
|
# {theHippo4jAgentJarDir} is the directory where the hippo4j agent jar file is located
|
||
2 years ago
|
logging.dir=${SW_LOGGING_DIR:}
|
||
|
# Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs.
|
||
|
# JSON resolver prints logs in JSON format.
|
||
|
logging.resolver=${SW_LOGGING_RESOLVER:PATTERN}
|
||
|
# Logging format. There are all conversion specifiers:
|
||
|
# * %level means log level.
|
||
|
# * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.
|
||
|
# * %thread means name of current thread.
|
||
|
# * %msg means some message which user logged.
|
||
|
# * %class means SimpleName of TargetClass.
|
||
|
# * %throwable means a throwable which user called.
|
||
|
# * %agent_name means agent.service_name. Only apply to the PatternLogger.
|
||
|
logging.pattern=${SW_LOGGING_PATTERN:%level %timestamp %thread %class : %msg %throwable}
|
||
|
# Logging max_file_size, default: 300 * 1024 * 1024 = 314572800
|
||
|
logging.max_file_size=${SW_LOGGING_MAX_FILE_SIZE:314572800}
|
||
|
# The max history log files. When rollover happened, if log files exceed this number,
|
||
|
# then the oldest file will be delete. Negative or zero means off, by default.
|
||
|
logging.max_history_files=${SW_LOGGING_MAX_HISTORY_FILES:-1}
|
||
|
|
||
|
# Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow.
|
||
|
# Besides, the annotation named IgnoredException in the trace toolkit is another way to configure ignored exceptions.
|
||
|
statuscheck.ignored_exceptions=${SW_STATUSCHECK_IGNORED_EXCEPTIONS:}
|
||
|
# The max recursive depth when checking the exception traced by the agent. Typically, we don't recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.
|
||
|
statuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1}
|
||
|
|
||
|
# Max element count in the correlation context
|
||
|
correlation.element_max_number=${SW_CORRELATION_ELEMENT_MAX_NUMBER:3}
|
||
|
|
||
|
# Max value length of each element.
|
||
|
correlation.value_max_length=${SW_CORRELATION_VALUE_MAX_LENGTH:128}
|
||
|
# Tag the span by the key/value in the correlation context, when the keys listed here exist.
|
||
|
correlation.auto_tag_keys=${SW_CORRELATION_AUTO_TAG_KEYS:}
|
||
|
# The buffer size of collected JVM info.
|
||
|
jvm.buffer_size=${SW_JVM_BUFFER_SIZE:600}
|
||
|
# The buffer channel size.
|
||
|
buffer.channel_size=${SW_BUFFER_CHANNEL_SIZE:5}
|
||
|
# The buffer size.
|
||
|
buffer.buffer_size=${SW_BUFFER_BUFFER_SIZE:300}
|
||
|
# If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile.
|
||
|
profile.active=${SW_AGENT_PROFILE_ACTIVE:true}
|
||
|
# Parallel monitor segment count
|
||
|
profile.max_parallel=${SW_AGENT_PROFILE_MAX_PARALLEL:5}
|
||
|
# Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it.
|
||
|
profile.duration=${SW_AGENT_PROFILE_DURATION:10}
|
||
|
# Max dump thread stack depth
|
||
|
profile.dump_max_stack_depth=${SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH:500}
|
||
|
# Snapshot transport to backend buffer size
|
||
|
profile.snapshot_transport_buffer_size=${SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE:4500}
|
||
|
# If true, the agent collects and reports metrics to the backend.
|
||
|
meter.active=${SW_METER_ACTIVE:true}
|
||
|
# Report meters interval. The unit is second
|
||
|
meter.report_interval=${SW_METER_REPORT_INTERVAL:20}
|
||
|
# Max size of the meter pool
|
||
|
meter.max_meter_size=${SW_METER_MAX_METER_SIZE:500}
|
||
|
# The max size of message to send to server.Default is 10 MB
|
||
|
log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}
|
||
|
|
||
|
# Mount the specific folders of the plugins. Plugins in mounted folders would work.
|
||
|
plugin.mount=${SW_MOUNT_FOLDERS:plugins,activations}
|
||
|
# Peer maximum description limit.
|
||
|
plugin.peer_max_length=${SW_PLUGIN_PEER_MAX_LENGTH:200}
|
||
|
# Exclude some plugins define in plugins dir.Plugin names is defined in [Agent plugin list](Plugin-list.md)
|
||
|
plugin.exclude_plugins=${SW_EXCLUDE_PLUGINS:}
|
||
|
# If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters.
|
||
|
plugin.mongodb.trace_param=${SW_PLUGIN_MONGODB_TRACE_PARAM:false}
|
||
|
# If set to positive number, the `WriteRequest.params` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||
|
plugin.mongodb.filter_length_limit=${SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT:256}
|
||
|
# If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false.
|
||
|
plugin.elasticsearch.trace_dsl=${SW_PLUGIN_ELASTICSEARCH_TRACE_DSL:false}
|
||
|
# If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false.
|
||
|
plugin.springmvc.use_qualified_name_as_endpoint_name=${SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME:false}
|
||
|
# If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false.
|
||
|
plugin.toolit.use_qualified_name_as_operation_name=${SW_PLUGIN_TOOLIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false}
|
||
|
# If set to true, the parameters of the sql (typically `java.sql.PreparedStatement`) would be collected.
|
||
|
plugin.jdbc.trace_sql_parameters=${SW_JDBC_TRACE_SQL_PARAMETERS:false}
|
||
|
# If set to positive number, the `db.sql.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||
|
plugin.jdbc.sql_parameters_max_length=${SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH:512}
|
||
|
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||
|
plugin.jdbc.sql_body_max_length=${SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH:2048}
|
||
|
# If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false.
|
||
|
plugin.solrj.trace_statement=${SW_PLUGIN_SOLRJ_TRACE_STATEMENT:false}
|
||
|
# If true, trace all the operation parameters in Solr request, default is false.
|
||
|
plugin.solrj.trace_ops_params=${SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS:false}
|
||
|
# If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request.
|
||
|
plugin.light4j.trace_handler_chain=${SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN:false}
|
||
|
# If true, the transaction definition name will be simplified.
|
||
|
plugin.springtransaction.simplify_transaction_definition_name=${SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME:false}
|
||
|
# Threading classes (`java.lang.Runnable` and `java.util.concurrent.Callable`) and their subclasses, including anonymous inner classes whose name match any one of the `THREADING_CLASS_PREFIXES` (splitted by `,`) will be instrumented, make sure to only specify as narrow prefixes as what you're expecting to instrument, (`java.` and `javax.` will be ignored due to safety issues)
|
||
|
plugin.jdkthreading.threading_class_prefixes=${SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES:}
|
||
|
# This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
|
||
|
plugin.tomcat.collect_http_params=${SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS:false}
|
||
|
# This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either `plugin.tomcat.collect_http_params` or `plugin.springmvc.collect_http_params`. Also, activate implicitly in the profiled trace.
|
||
|
plugin.springmvc.collect_http_params=${SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS:false}
|
||
|
# This config item controls that whether the HttpClient plugin should collect the parameters of the request
|
||
|
plugin.httpclient.collect_http_params=${SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS:false}
|
||
|
# When `COLLECT_HTTP_PARAMS` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.
|
||
|
plugin.http.http_params_length_threshold=${SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD:1024}
|
||
|
# When `include_http_headers` declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance.
|
||
|
plugin.http.http_headers_length_threshold=${SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD:2048}
|
||
|
# Set the header names, which should be collected by the plugin. Header name must follow `javax.servlet.http` definition. Multiple names should be split by comma.
|
||
|
plugin.http.include_http_headers=${SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS:}
|
||
|
# This config item controls that whether the Feign plugin should collect the http body of the request.
|
||
|
plugin.feign.collect_request_body=${SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY:false}
|
||
|
# When `COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
|
||
|
plugin.feign.filter_length_limit=${SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT:1024}
|
||
|
# When `COLLECT_REQUEST_BODY` is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
|
||
|
plugin.feign.supported_content_types_prefix=${SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
|
||
|
# If true, trace all the influxql(query and write) in InfluxDB access, default is true.
|
||
|
plugin.influxdb.trace_influxql=${SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL:true}
|
||
|
# Apache Dubbo consumer collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
|
||
|
plugin.dubbo.collect_consumer_arguments=${SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS:false}
|
||
|
# When `plugin.dubbo.collect_consumer_arguments` is `true`, Arguments of length from the front will to the OAP backend
|
||
|
plugin.dubbo.consumer_arguments_length_threshold=${SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD:256}
|
||
|
# Apache Dubbo provider collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
|
||
|
plugin.dubbo.collect_provider_arguments=${SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS:false}
|
||
|
# When `plugin.dubbo.collect_provider_arguments` is `true`, Arguments of length from the front will to the OAP backend
|
||
|
plugin.dubbo.provider_arguments_length_threshold=${SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD:256}
|
||
|
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
|
||
|
plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092}
|
||
|
# Timeout period of reading topics from the Kafka server, the unit is second.
|
||
|
plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10}
|
||
|
# Kafka producer configuration. Read [producer configure](http://kafka.apache.org/24/documentation.html#producerconfigs)
|
||
|
# to get more details. Check document for more details and examples.
|
||
|
plugin.kafka.producer_config=${sw_plugin_kafka_producer_config:}
|
||
|
# Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication.
|
||
|
plugin.kafka.producer_config_json=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON:}
|
||
|
# Specify which Kafka topic name for Meter System data to report to.
|
||
|
plugin.kafka.topic_meter=${SW_PLUGIN_KAFKA_TOPIC_METER:skywalking-meters}
|
||
|
# Specify which Kafka topic name for JVM metrics data to report to.
|
||
|
plugin.kafka.topic_metrics=${SW_PLUGIN_KAFKA_TOPIC_METRICS:skywalking-metrics}
|
||
|
# Specify which Kafka topic name for traces data to report to.
|
||
|
plugin.kafka.topic_segment=${SW_PLUGIN_KAFKA_TOPIC_SEGMENT:skywalking-segments}
|
||
|
# Specify which Kafka topic name for Thread Profiling snapshot to report to.
|
||
|
plugin.kafka.topic_profiling=${SW_PLUGIN_KAFKA_TOPIC_PROFILINGS:skywalking-profilings}
|
||
|
# Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to.
|
||
|
plugin.kafka.topic_management=${SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT:skywalking-managements}
|
||
|
# Specify which Kafka topic name for the logging data to report to.
|
||
|
plugin.kafka.topic_logging=${SW_PLUGIN_KAFKA_TOPIC_LOGGING:skywalking-logs}
|
||
|
# isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with `-` ).
|
||
|
plugin.kafka.namespace=${SW_KAFKA_NAMESPACE:}
|
||
|
# Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when `Spring annotation plugin` has been activated.
|
||
|
plugin.springannotation.classname_match_regex=${SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX:}
|
||
|
# Whether or not to transmit logged data as formatted or un-formatted.
|
||
|
plugin.toolkit.log.transmit_formatted=${SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED:true}
|
||
|
# If set to true, the parameters of Redis commands would be collected by Lettuce agent.
|
||
|
plugin.lettuce.trace_redis_parameters=${SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS:false}
|
||
|
# If set to positive number and `plugin.lettuce.trace_redis_parameters` is set to `true`, Redis command parameters would be collected and truncated to this length.
|
||
|
plugin.lettuce.redis_parameter_max_length=${SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH:128}
|
||
|
# If set to true, the parameters of the cypher would be collected.
|
||
|
plugin.neo4j.trace_cypher_parameters=${SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS:false}
|
||
|
# If set to positive number, the `db.cypher.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||
|
plugin.neo4j.cypher_parameters_max_length=${SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH:512}
|
||
|
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
|
||
|
plugin.neo4j.cypher_body_max_length=${SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH:2048}
|
||
|
# If set to a positive number and activate `trace sampler CPU policy plugin`, the trace would not be collected when agent process CPU usage percent is greater than `plugin.cpupolicy.sample_cpu_usage_percent_limit`.
|
||
|
plugin.cpupolicy.sample_cpu_usage_percent_limit=${SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT:-1}
|