curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum makecache
yum install net-tools wget -y
yum install chrony -y
sed -i "/server/d" /etc/chrony.conf
vi /etc/chrony.conf 增加 server ntp.aliyun.com iburst
systemctl restart chronyd
chronyc tracking
systemctl stop firewalld
systemctl disable firewalld
vi /etc/selinux/config
SELINUX=disabled
swapoff -a
sudo sysctl vm.swappiness=0
vi /etc/fstab #注释掉swap这行
vi /etc/sysctl.conf 添加如下
vm.swappiness = 0
论证是否生效
sudo sysctl vm.swappiness
vi /etc/security/limits.conf 添加如下
* soft nofile 65536
* hard nofile 65536
sysctl -w vm.max_map_count=262144
vi /etc/sysctl.conf
vm.max_map_count=262144
sysctl vm.max_map_count
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y docker-ce docker-ce-cli containerd.io
如果下载速度过慢可以利用本地离线安装:
cd /home
wget ftp://ftp.vip56.cn:88/software/docker/containerd.io-1.2.13-3.1.el7.x86_64.rpm ftp://ftp.vip56.cn:88/software/docker/docker-ce-19.03.8-3.el7.x86_64.rpm ftp://ftp.vip56.cn:88/software/docker/docker-ce-cli-19.03.8-3.el7.x86_64.rpm
sudo yum localinstall -y containerd.io-1.2.13-3.1.el7.x86_64.rpm docker-ce-19.03.8-3.el7.x86_64.rpm docker-ce-cli-19.03.8-3.el7.x86_64.rpm
sudo systemctl start docker
sudo systemctl enable docker
vi /etc/docker/daemon.json 添加如下内容
{
"registry-mirrors": ["https://harbor.vip56.cn"]
}
sudo systemctl restart docker
sudo curl -L ftp://ftp.vip56.cn:88/software/docker/docker-compose-Linux-x86_64 -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker pull harbor.vip56.cn/common/elasticsearch:7.8.0
docker pull harbor.vip56.cn/common/kibana:7.8.0
这里服务我们将采用Docker Compose
进行部署。
version: ‘2.2‘
services:
es01:
image: harbor.vip56.cn/common/elasticsearch:7.8.0
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
es02:
image: harbor.vip56.cn/common/elasticsearch:7.8.0
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
ports:
- 9201:9200
networks:
- elastic
es03:
image: harbor.vip56.cn/common/elasticsearch:7.8.0
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
ports:
- 9202:9200
networks:
- elastic
kib01:
image: harbor.vip56.cn/common/kibana:7.8.0
container_name: kib01
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://es01:9200
ELASTICSEARCH_HOSTS: http://es01:9200
networks:
- elastic
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
networks:
elastic:
driver: bridge
启动服务docker-compose up
docker pull harbor.vip56.cn/common/apm-server:7.0.1
apm-server:
host: "0.0.0.0:8200"
#queue:
#mem:
# Max number of events the queue can buffer.
#events: 4096
#setup.template.pattern: "apm-%{[observer.version]}-*"
#setup.template.overwrite: false
#setup.template.settings:
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
#mapping.total_fields.limit: 2000
output.elasticsearch:
hosts: ["192.168.153.154:9200"]
# Number of workers per Elasticsearch host.
#worker: 1
indices:
- index: "apm-%{[observer.version]}-sourcemap"
when.contains:
processor.event: "sourcemap"
- index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
when.contains:
processor.event: "error"
- index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
when.contains:
processor.event: "transaction"
- index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
when.contains:
processor.event: "span"
- index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
when.contains:
processor.event: "metric"
- index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
when.contains:
processor.event: "onboarding"
#max_retries: 3
#bulk_max_size: 50
#backoff.max: 60s
#timeout: 90
#logging.level: info
#logging.to_syslog: true
#logging.metrics.enabled: false
#logging.metrics.period: 30s
#logging.to_files: true
#logging.files:
#path: /var/log/apm-server
#name: apm-server
#rotateeverybytes: 10485760 # = 10MB
#keepfiles: 7
#permissions: 0600
#interval: 0
#logging.json: false
#http.enabled: false
#http.host: localhost
#http.port: 5066
docker run -d --name=apm-server --user=apm-server --volume="$(pwd)/apm-server.yml:/usr/share/apm-serv
er/apm-server.yml:ro" -p 8200:8200 harbor.vip56.cn/common/apm-server:7.0.1
新建一个ASP.NET CORE
项目,然后安装Elastic.Apm.AspNetCore
包,并且在以下
方法中使用对应的初始化方法:
public class Startup
{
public void Configure(IApplicationBuilder app, IHostingEnvironment env)
{
app.UseElasticApm(Configuration);
}
}
完成以上代码初始化后,接着我们需要编写对应的配置项以实现接入appsettings.json
:
"ElasticApm": {
"SecretToken": "",
"ServerUrls": "http://localhost:8200",
"ServiceName" : "MyApp",
}
上述的方式仅仅实现了采集常规的Http服务的请求,如果开发者还希望采集诸如由HttpClient
与ERCore
发出的请求则需要手动进行相关的注册:
public class Startup
{
public void Configure(IApplicationBuilder app, IHostingEnvironment env)
{
app.UseElasticApm(Configuration,
new HttpDiagnosticsSubscriber(),
new EfCoreDiagnosticsSubscriber());
}
}
除了以上提供的基于现有框架的度量指标采集外,我们还可以通过对应的API实现自主的度量指标
的记录注入,比如下面这种方式:
var transaction = Elastic.Apm.Agent.Tracer.StartTransaction("MyTransaction", ApiConstants.TypeRequest);
transaction.Labels["system"] = "tmsystem";
transaction.Labels["group"] = "coregroup";
try
{
var span1 = transaction.StartSpan("from db", "Database");
Thread.Sleep(100);
var childspan1 = span1.StartSpan("from db2", "subspan");
Thread.Sleep(50);
childspan1.End();
span1.End();
}
catch(Exception ex)
{
transaction.CaptureException(ex);
throw;
}
finally
{
transaction.End();
}
有时因为需要调用其他方法,但是内部方法没有transaction
对象,此时为了不破坏函数的入参
我们就需要利用var transaction = Elastic.Apm.Agent.Tracer.CurrentTransaction;
来
获取对象,如果对象为NULL
则代表当前没有创建过该对象,保证对应方法也能进行度量指标记录。对应的还有子级SPAN
也可以通过var span = Elastic.Apm.Agent.Tracer.CurrentSpan;
方式获取。
首先我们需要在对应项目中引用具体框架,比如pom.xml
文件:
<dependency>
<groupId>co.elastic.apm</groupId>
<artifactId>apm-agent-attach</artifactId>
<version>1.17.0</version>
</dependency>
然后我们需要在项目的main
方法中进行初始化,比如下面我们就以Spring Boot
为例进行说明:
@RetrofitServiceScan
@SpringBootApplication
public class Application {
public static void main(String[] args) {
ElasticApmAttacher.attach();
SpringApplication.run(Application.class, args);
}
}
接着编写对应的配置文件elasticapm.properties
写入如下内容:
service_name=my-cool-service
application_packages=com.logidelta.industrialbigdata
server_urls=http://192.168.153.155:8200
除了本身已经自带的指标采集外,我们还可以通过对应的API实现自注指标的采集推送,首先我们
需要引入对应的库才能实现,打开pom.xml
文件增加如下内容:
<dependency>
<groupId>co.elastic.apm</groupId>
<artifactId>apm-agent-api</artifactId>
<version>1.17.0</version>
</dependency>
完成以上的引入之后我们就可以使用Java API进行自定义指标的采集了,比如如下方式:
Transaction transaction = ElasticApm.currentTransaction();
try {
transaction.setName("DeviceController#Test");
transaction.setType(Transaction.TYPE_REQUEST);
transaction.addLabel("system", "tmsyste");
transaction.addLabel("group", "coregroup");
Span span1 = transaction.startSpan("from db", "mysql", "query");
span1.setName("select * from db");
span1.end();
} catch (Exception e) {
transaction.captureException(e);
throw e;
} finally {
transaction.end();
}
这里我们直接使用currentTransaction
方法,内部会自行实现对象的创建,所以可以不必检查
是否为NULL
,对应的currentSpan
也是一样的。
除了以上通过使用具体方法外,还可以通过注解属性快速便携的进行相关度量指标的采集,如
@CaptureTransaction
、@CaptureSpan
和@Traced
。
基于Docker安装并使用Elastic APM实现指标监控
原文:https://www.cnblogs.com/yaozhenfa/p/13305195.html