本文会将:
1. elasticsearch的数据存放在MongoDB
2. 用rsyslog主动的向ELK推送日志,而不是在目标端安装Agent抓取

注意:
1. 关闭防火墙
2. 关闭SELINUX

下载软件介质:

Elastic.io

[root@elastic-elk-stack software]# pwd
/software
[root@elastic-elk-stack software]# ls -ltr
total 244388
-rw-r--r--. 1 root root  11988533 Dec  7 00:14 filebeat-6.0.1-x86_64.rpm
-rw-r--r--. 1 root root  22147569 Dec  7 00:14 metricbeat-6.0.1-x86_64.rpm
-rw-r--r--. 1 root root  13520163 Dec  7 00:14 packetbeat-6.0.1-x86_64.rpm
-rw-r--r--. 1 root root  27973688 Dec  7 00:15 elasticsearch-6.0.1.rpm
-rw-r--r--. 1 root root  64011211 Dec  7 00:15 kibana-6.0.1-x86_64.rpm
-rw-r--r--. 1 root root 110602855 Dec  7 00:15 logstash-6.0.1.rpm
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# du -sh *
27M     elasticsearch-6.0.1.rpm
12M     filebeat-6.0.1-x86_64.rpm
62M     kibana-6.0.1-x86_64.rpm
106M    logstash-6.0.1.rpm
22M     metricbeat-6.0.1-x86_64.rpm
13M     packetbeat-6.0.1-x86_64.rpm
[root@elastic-elk-stack software]# 

Java支持:

[root@elastic-elk-stack software]# java -version
openjdk version "1.8.0_131"
OpenJDK Runtime Environment (build 1.8.0_131-b12)
OpenJDK 64-Bit Server VM (build 25.131-b12, mixed mode)
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# rpm -ivh jdk-9.0.1_linux-x64_bin.rpm 
Preparing...                          ################################# [100%]
Updating / installing...
   1:jdk-9.0.1-2000:9.0.1-ga          ################################# [100%]
Unpacking JAR files...
        plugin.jar...
        javaws.jar...
        deploy.jar...
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/16x16/apps/sun-java.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/16x16/apps/sun-javaws.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/48x48/apps/sun-java.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/48x48/apps/sun-javaws.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/HighContrast/16x16/apps/sun-java.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/HighContrast/16x16/apps/sun-javaws.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/HighContrast/48x48/apps/sun-java.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/HighContrast/48x48/apps/sun-javaws.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/48x48/apps/sun-java.png’: No such file or directory
cp: cannot stat ‘/usr/java/jdk-9.0.1/lib/desktop/icons/hicolor/48x48/apps/sun-javaws.png’: No such file or directory
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# java -version
java version "9.0.1"
Java(TM) SE Runtime Environment (build 9.0.1+11)
Java HotSpot(TM) 64-Bit Server VM (build 9.0.1+11, mixed mode)
[root@elastic-elk-stack software]# 

安装:ElasticSearch

[root@elastic-elk-stack software]# rpm -ivh elasticsearch-6.0.1.rpm 
warning: elasticsearch-6.0.1.rpm: Header V4 RSA/SHA512 Signature, key ID d88e42b4: NOKEY
Preparing...                          ################################# [100%]
Creating elasticsearch group... OK
Creating elasticsearch user... OK
Updating / installing...
   1:elasticsearch-0:6.0.1-1          ################################# [100%]
### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using systemd
 sudo systemctl daemon-reload
 sudo systemctl enable elasticsearch.service
### You can start elasticsearch service by executing
 sudo systemctl start elasticsearch.service
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# systemctl enable elasticsearch
Created symlink from /etc/systemd/system/multi-user.target.wants/elasticsearch.service to /usr/lib/systemd/system/elasticsearch.service.
[root@elastic-elk-stack software]# 

安装:MongoDB

[root@elastic-elk-stack mongodb_rpm]# cat /etc/yum.repos.d/mongodb.repo 
[mongodb-org-3.6]
name=MongoDB Repository
#baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/3.6/x86_64/
baseurl=file:///software/mongodb_rpm
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.6.asc
[root@elastic-elk-stack mongodb_rpm]# 
[root@elastic-elk-stack mongodb_rpm]# yum install -y mongodb-org*
Loaded plugins: fastestmirror, langpacks
Examining mongodb-org-3.6.0-1.el7.x86_64.rpm: mongodb-org-3.6.0-1.el7.x86_64
Marking mongodb-org-3.6.0-1.el7.x86_64.rpm to be installed
Examining mongodb-org-mongos-3.6.0-1.el7.x86_64.rpm: mongodb-org-mongos-3.6.0-1.el7.x86_64
Marking mongodb-org-mongos-3.6.0-1.el7.x86_64.rpm to be installed
Examining mongodb-org-server-3.6.0-1.el7.x86_64.rpm: mongodb-org-server-3.6.0-1.el7.x86_64
mongodb-org-server-3.6.0-1.el7.x86_64.rpm: does not update installed package.
Examining mongodb-org-shell-3.6.0-1.el7.x86_64.rpm: mongodb-org-shell-3.6.0-1.el7.x86_64
Marking mongodb-org-shell-3.6.0-1.el7.x86_64.rpm to be installed
Examining mongodb-org-tools-3.6.0-1.el7.x86_64.rpm: mongodb-org-tools-3.6.0-1.el7.x86_64
Marking mongodb-org-tools-3.6.0-1.el7.x86_64.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package mongodb-org.x86_64 0:3.6.0-1.el7 will be installed
---> Package mongodb-org-mongos.x86_64 0:3.6.0-1.el7 will be installed
---> Package mongodb-org-shell.x86_64 0:3.6.0-1.el7 will be installed
---> Package mongodb-org-tools.x86_64 0:3.6.0-1.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

=============================================================================================================================
 Package                      Arch             Version                Repository                                        Size
=============================================================================================================================
Installing:
 mongodb-org                  x86_64           3.6.0-1.el7            /mongodb-org-3.6.0-1.el7.x86_64                  0.0  
 mongodb-org-mongos           x86_64           3.6.0-1.el7            /mongodb-org-mongos-3.6.0-1.el7.x86_64            32 M
 mongodb-org-shell            x86_64           3.6.0-1.el7            /mongodb-org-shell-3.6.0-1.el7.x86_64             32 M
 mongodb-org-tools            x86_64           3.6.0-1.el7            /mongodb-org-tools-3.6.0-1.el7.x86_64            143 M

Transaction Summary
=============================================================================================================================
Install  4 Packages

Total size: 207 M
Installed size: 207 M
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : mongodb-org-shell-3.6.0-1.el7.x86_64                                                                      1/4 
  Installing : mongodb-org-tools-3.6.0-1.el7.x86_64                                                                      2/4 
  Installing : mongodb-org-mongos-3.6.0-1.el7.x86_64                                                                     3/4 
  Installing : mongodb-org-3.6.0-1.el7.x86_64                                                                            4/4 
  Verifying  : mongodb-org-mongos-3.6.0-1.el7.x86_64                                                                     1/4 
  Verifying  : mongodb-org-tools-3.6.0-1.el7.x86_64                                                                      2/4 
  Verifying  : mongodb-org-shell-3.6.0-1.el7.x86_64                                                                      3/4 
  Verifying  : mongodb-org-3.6.0-1.el7.x86_64                                                                            4/4 

Installed:
  mongodb-org.x86_64 0:3.6.0-1.el7         mongodb-org-mongos.x86_64 0:3.6.0-1.el7   mongodb-org-shell.x86_64 0:3.6.0-1.el7  
  mongodb-org-tools.x86_64 0:3.6.0-1.el7  

Complete!
[root@elastic-elk-stack mongodb_rpm]# 
[root@elastic-elk-stack mongodb_rpm]# service mongod status
Redirecting to /bin/systemctl status mongod.service
● mongod.service - High-performance, schema-free document-oriented database
   Loaded: loaded (/usr/lib/systemd/system/mongod.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2017-12-12 23:15:31 CST; 4min 13s ago
     Docs: https://docs.mongodb.org/manual
 Main PID: 7532 (mongod)
   CGroup: /system.slice/mongod.service
           └─7532 /usr/bin/mongod -f /etc/mongod.conf

Dec 12 23:15:31 elastic-elk-stack systemd[1]: Starting High-performance, schema-free document-oriented database...
Dec 12 23:15:31 elastic-elk-stack systemd[1]: Started High-performance, schema-free document-oriented database.
Dec 12 23:15:31 elastic-elk-stack mongod[7529]: about to fork child process, waiting until server is ready for connections.
Dec 12 23:15:31 elastic-elk-stack mongod[7529]: forked process: 7532
Dec 12 23:15:32 elastic-elk-stack mongod[7529]: child process started successfully, parent exiting
[root@elastic-elk-stack mongodb_rpm]# 
[root@elastic-elk-stack mongodb_rpm]# ps -ef | grep mongo
mongod    7532     1  0 23:15 ?        00:00:00 /usr/bin/mongod -f /etc/mongod.conf
root      7681  5196  0 23:19 pts/1    00:00:00 grep --color=auto mongo
[root@elastic-elk-stack mongodb_rpm]# 
[root@elastic-elk-stack mongodb_rpm]# netstat -tupln | grep mongo
tcp        0      0 127.0.0.1:27017         0.0.0.0:*               LISTEN      7532/mongod         
[root@elastic-elk-stack mongodb_rpm]# 

安装:PIP

[root@elastic-elk-stack software]# wget https://bootstrap.pypa.io/get-pip.py
--2017-12-12 23:21:40--  https://bootstrap.pypa.io/get-pip.py
Resolving bootstrap.pypa.io (bootstrap.pypa.io)... 151.101.228.175
Connecting to bootstrap.pypa.io (bootstrap.pypa.io)|151.101.228.175|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1595408 (1.5M) [text/x-python]
Saving to: ‘get-pip.py’

100%[===================================================================================>] 1,595,408   23.2KB/s   in 67s    

2017-12-12 23:22:49 (23.2 KB/s) - ‘get-pip.py’ saved [1595408/1595408]

[root@elastic-elk-stack software]# python -V
Python 2.7.5
[root@elastic-elk-stack software]# python get-pip.py 
Collecting pip
  Downloading pip-9.0.1-py2.py3-none-any.whl (1.3MB)
    100% |████████████████████████████████| 1.3MB 57kB/s 
Collecting wheel
  Downloading wheel-0.30.0-py2.py3-none-any.whl (49kB)
    100% |████████████████████████████████| 51kB 36kB/s 
Installing collected packages: pip, wheel
Successfully installed pip-9.0.1 wheel-0.30.0
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# pip -V
pip 9.0.1 from /usr/lib/python2.7/site-packages (python 2.7)
[root@elastic-elk-stack software]# 

PIP – 安装:mongo-connector

Mongo Connector:
https://www.mongodb.com/blog/post/introducing-mongo-connector
https://github.com/mongodb-labs/mongo-connector

[root@elastic-elk-stack software]# pip install mongo-connector
Collecting mongo-connector
  Downloading mongo_connector-2.5.1-py2.py3-none-any.whl (58kB)
    100% |████████████████████████████████| 61kB 62kB/s 
Collecting pymongo>=2.9 (from mongo-connector)
  Downloading pymongo-3.6.0-cp27-cp27mu-manylinux1_x86_64.whl (381kB)
    100% |████████████████████████████████| 389kB 29kB/s 
Installing collected packages: pymongo, mongo-connector
Successfully installed mongo-connector-2.5.1 pymongo-3.6.0
[root@elastic-elk-stack software]# 

启动:elasticsearch

[root@elastic-elk-stack software]# service elasticsearch status
● elasticsearch.service - Elasticsearch
   Loaded: loaded (/usr/lib/systemd/system/elasticsearch.service; enabled; vendor preset: disabled)
   Active: inactive (dead)
     Docs: http://www.elastic.co
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# netstat -tupln | grep java
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# service elasticsearch start
Starting elasticsearch (via systemctl):                    [  OK  ]
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# service elasticsearch status
● elasticsearch.service - Elasticsearch
   Loaded: loaded (/usr/lib/systemd/system/elasticsearch.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2017-12-12 23:27:10 CST; 4s ago
     Docs: http://www.elastic.co
 Main PID: 7823 (java)
   CGroup: /system.slice/elasticsearch.service
           └─7823 /bin/java -Xms1g -Xmx1g -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiat...

Dec 12 23:27:10 elastic-elk-stack systemd[1]: Started Elasticsearch.
Dec 12 23:27:10 elastic-elk-stack systemd[1]: Starting Elasticsearch...
Dec 12 23:27:10 elastic-elk-stack elasticsearch[7823]: Java HotSpot(TM) 64-Bit Server VM warning: Option UseConcMarkSw...ase.
Hint: Some lines were ellipsized, use -l to show in full.
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# netstat -tupln | grep java
tcp6       0      0 127.0.0.1:9200          :::*                    LISTEN      7823/java           
tcp6       0      0 ::1:9200                :::*                    LISTEN      7823/java           
tcp6       0      0 127.0.0.1:9300          :::*                    LISTEN      7823/java           
tcp6       0      0 ::1:9300                :::*                    LISTEN      7823/java           
[root@elastic-elk-stack software]# 

PIP – 安装:’mongo-connector[elastic5]’

[root@elastic-elk-stack software]# pip install 'mongo-connector[elastic5]'
Requirement already satisfied: mongo-connector[elastic5] in /usr/lib/python2.7/site-packages
Requirement already satisfied: pymongo>=2.9 in /usr/lib64/python2.7/site-packages (from mongo-connector[elastic5])
Collecting elastic2-doc-manager[elastic5]; extra == "elastic5" (from mongo-connector[elastic5])
  Downloading elastic2_doc_manager-0.3.0-py2.py3-none-any.whl
Collecting elasticsearch<6.0.0,>=5.0.0; extra == "elastic5" (from elastic2-doc-manager[elastic5]; extra == "elastic5"->mongo-connector[elastic5])
  Downloading elasticsearch-5.5.1-py2.py3-none-any.whl (112kB)
    100% |████████████████████████████████| 112kB 48kB/s 
Collecting urllib3<1.22,>=1.21.1 (from elasticsearch<6.0.0,>=5.0.0; extra == "elastic5"->elastic2-doc-manager[elastic5]; extra == "elastic5"->mongo-connector[elastic5])
  Downloading urllib3-1.21.1-py2.py3-none-any.whl (131kB)
    100% |████████████████████████████████| 133kB 38kB/s 
Installing collected packages: urllib3, elasticsearch, elastic2-doc-manager
  Found existing installation: urllib3 1.10.2
    Uninstalling urllib3-1.10.2:
      Successfully uninstalled urllib3-1.10.2
Successfully installed elastic2-doc-manager-0.3.0 elasticsearch-5.5.1 urllib3-1.21.1
[root@elastic-elk-stack software]# 

同步:elasticsearch和mongodb的数据:

[root@elastic-elk-stack ~]# mongo-connector -m localhost:27017 -t localhost:9200 -d elastic2_doc_manager
Logging to /root/mongo-connector.log.
[root@elastic-elk-stack ~]# 

看日志:

[root@elastic-elk-stack ~]# tail -f /root/mongo-connector.log
2017-12-12 23:39:46,087 [ALWAYS] mongo_connector.connector:51 - Starting mongo-connector version: 2.5.1
2017-12-12 23:39:46,087 [ALWAYS] mongo_connector.connector:51 - Python version: 2.7.5 (default, Aug  4 2017, 00:39:18) 
[GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
2017-12-12 23:39:46,087 [ALWAYS] mongo_connector.connector:51 - Platform: Linux-3.10.0-693.el7.x86_64-x86_64-with-centos-7.4.1708-Core
2017-12-12 23:39:46,088 [ALWAYS] mongo_connector.connector:51 - pymongo version: 3.6.0
2017-12-12 23:39:46,088 [WARNING] mongo_connector.connector:167 - MongoConnector: Can't find /root/oplog.timestamp, attempting to create an empty progress log
2017-12-12 23:39:46,098 [ALWAYS] mongo_connector.connector:51 - Source MongoDB version: 3.6.0
2017-12-12 23:39:46,098 [ALWAYS] mongo_connector.connector:51 - Target DocManager: mongo_connector.doc_managers.elastic2_doc_manager version: 0.3.0
2017-12-12 23:39:46,100 [ERROR] mongo_connector.connector:373 - No replica set at "localhost:27017"! A replica set is required to run mongo-connector. Shutting down...

可以看到,失败了,需要以replica的方式运行mongodb。

以replica的方式运行mongodb:
创建目录:

[root@elastic-elk-stack software]# mkdir /home/data/mongodb/replica -p
[root@elastic-elk-stack software]# 

配置文件:

[root@elastic-elk-stack ~]# cat /etc/mongod.conf 
# mongod.conf

# for documentation of all options, see:
#   http://docs.mongodb.org/manual/reference/configuration-options/

# where to write logging data.
systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log

# Where and how to store data.
storage:
  #dbPath: /var/lib/mongo
  dbPath: /home/data/mongodb/replica
  journal:
    enabled: true
#  engine:
#  mmapv1:
#  wiredTiger:

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27017
  bindIp: 127.0.0.1  # Listen to local interface only, comment to listen on all interfaces.


#security:

#operationProfiling:

#replication:

#sharding:

## Enterprise-Only Options

#auditLog:

#snmp:
[root@elastic-elk-stack ~]# 
[root@elastic-elk-stack ~]# 

启动:

[root@elastic-elk-stack software]# mongod --replSet myDevReplSet -f /etc/mongod.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 8655
child process started successfully, parent exiting
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# ps -ef | grep mongo
root      8472  8433  0 23:40 pts/3    00:00:00 tail -f /root/mongo-connector.log
root      8655     1  0 23:55 ?        00:00:00 mongod --replSet myDevReplSet -f /etc/mongod.conf
root      8690  5196  0 23:55 pts/1    00:00:00 grep --color=auto mongo
[root@elastic-elk-stack software]# 

初始化:replica set

[root@elastic-elk-stack software]# mongo
MongoDB shell version v3.6.0
connecting to: mongodb://127.0.0.1:27017
MongoDB server version: 3.6.0
Server has startup warnings: 
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] 
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] 
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] 
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] 
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2017-12-12T23:55:15.582+0800 I CONTROL  [initandlisten] 
> rs.initiate()
{
        "info2" : "no configuration specified. Using a default configuration for the set",
        "me" : "127.0.0.1:27017",
        "ok" : 1,
        "operationTime" : Timestamp(1513094179, 1),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1513094179, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}
myDevReplSet:SECONDARY> exit
bye
[root@elastic-elk-stack software]# 

再次启动elasticsearch与mongodb的同步:

[root@elastic-elk-stack ~]# nohup mongo-connector -m localhost:27017 -t localhost:9200 -d elastic2_doc_manager &
[2] 8784
[root@elastic-elk-stack ~]# nohup: ignoring input and appending output to ‘nohup.out’

[root@elastic-elk-stack ~]# 

日志:/root/mongo-connector.log

2017-12-12 23:58:15,414 [ALWAYS] mongo_connector.connector:51 - Starting mongo-connector version: 2.5.1
2017-12-12 23:58:15,414 [ALWAYS] mongo_connector.connector:51 - Python version: 2.7.5 (default, Aug  4 2017, 00:39:18) 
[GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
2017-12-12 23:58:15,415 [ALWAYS] mongo_connector.connector:51 - Platform: Linux-3.10.0-693.el7.x86_64-x86_64-with-centos-7.4.1708-Core
2017-12-12 23:58:15,415 [ALWAYS] mongo_connector.connector:51 - pymongo version: 3.6.0
2017-12-12 23:58:15,423 [ALWAYS] mongo_connector.connector:51 - Source MongoDB version: 3.6.0
2017-12-12 23:58:15,423 [ALWAYS] mongo_connector.connector:51 - Target DocManager: mongo_connector.doc_managers.elastic2_doc_manager version: 0.3.0

修改下,ElasticSearch的目录:
创建:

[root@elastic-elk-stack ~]# mkdir /home/data/elastic/elasticsearch/{data,logs} -p
[root@elastic-elk-stack ~]# 
[root@elastic-elk-stack ~]# chown -R elasticsearch.elasticsearch /home/data/
[root@elastic-elk-stack ~]# 

修改配置文件:

[root@elastic-elk-stack ~]# cat /etc/elasticsearch/elasticsearch.yml 
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
#       Before you set out to tweak and tune the configuration, make sure you
#       understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
node.name: elastic-elk-stack

node.data: true

#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
#path.data: /var/lib/elasticsearch
path.data: /home/data/elastic/elasticsearch/data
#
# Path to log files:
#
#path.logs: /var/log/elasticsearch
path.logs: /home/data/elastic/elasticsearch/logs

#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
network.host: 10.158.1.**

#
# Set a custom port for HTTP:
#
#http.port: 9200
http.port: 9200

#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
#
#discovery.zen.minimum_master_nodes: 3
#
# For more information, consult the zen discovery module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
[root@elastic-elk-stack ~]# 

重启:service elasticsearch restart

安装:kibana

[root@elastic-elk-stack software]# rpm -ivh kibana-6.0.1-x86_64.rpm 
warning: kibana-6.0.1-x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID d88e42b4: NOKEY
Preparing...                          ################################# [100%]
Updating / installing...
   1:kibana-6.0.1-1                   ################################# [100%]
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# systemctl enable kibana
Created symlink from /etc/systemd/system/multi-user.target.wants/kibana.service to /etc/systemd/system/kibana.service.
[root@elastic-elk-stack software]# 

配置文件:

[root@elastic-elk-stack ~]# cat /etc/kibana/kibana.yml 
# Kibana is served by a back end server.L This setting specifies the port to use.
server.port: 5601

# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "10.158.1.**"

# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
# to Kibana. This setting cannot end in a slash.
#server.basePath: ""

# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576

# The Kibana server's name.  This is used for display purposes.
server.name: "elastic-elk-stack"

# The URL of the Elasticsearch instance to use for all your queries.
#elasticsearch.url: "http://10.168.1.***:9200"
#elasticsearch.url: "10.168.1.***:9200"
elasticsearch.url: "http://10.158.1.***:9200"

# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
#elasticsearch.preserveHost: true

# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
kibana.index: ".kibana"

# The default application to load.
#kibana.defaultAppId: "discover"

# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "user"
#elasticsearch.password: "pass"

# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key

# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key

# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]

# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full

# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
elasticsearch.pingTimeout: 1500

# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000

# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]

# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}

# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 0

# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
#elasticsearch.startupTimeout: 5000

# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid

# Enables you specify a file where Kibana stores log output.
logging.dest: stdout

# Set the value of this setting to true to suppress all logging output.
#logging.silent: false

# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false

# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false

# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000

# The default locale. This locale can be used in certain circumstances to substitute any missing
# translations.
#i18n.defaultLocale: "en"
[root@elastic-elk-stack ~]# 

启动服务:

[root@elastic-elk-stack ~]# service kibana start
kibana started
[root@elastic-elk-stack ~]# 
[root@elastic-elk-stack software]# netstat -tupln | grep 5601
tcp        0      0 0.0.0.0:5601            0.0.0.0:*               LISTEN      9283/node           
[root@elastic-elk-stack software]# 

rsyslog – 服务端:
创建存放接收的日志的路径:

[root@elastic-elk-stack ~]# mkdir /home/data/rsyslog/log -p
[root@elastic-elk-stack ~]# 

配置文件:

[root@elastic-elk-stack ~]# cat /etc/rsyslog.conf 
# rsyslog configuration file

# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html

#### MODULES ####

# The imjournal module bellow is now used as a message source instead of imuxsock.
$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
$ModLoad imjournal # provides access to the systemd journal
#$ModLoad imklog # reads kernel messages (the same are read from journald)
#$ModLoad immark  # provides --MARK-- message capability

# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514

# Provides TCP syslog reception
#$ModLoad imtcp
#$InputTCPServerRun 514

$AllowedSender tcp, 0.0.0.0/24


#### GLOBAL DIRECTIVES ####

# Where to place auxiliary files
$WorkDirectory /var/lib/rsyslog

# Use default timestamp format
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat

$template Remote,"/home/data/rsyslog/log/%fromhost-ip%/%fromhost-ip%_%$YEAR%-%$MONTH%-%$DAY%.log"
:fromhost-ip, !isequal, "127.0.0.1" ?Remote

# File syncing capability is disabled by default. This feature is usually not required,
# not useful and an extreme performance hit
#$ActionFileEnableSync on

# Include all config files in /etc/rsyslog.d/
$IncludeConfig /etc/rsyslog.d/*.conf

# Turn off message reception via local log socket;
# local messages are retrieved through imjournal now.
$OmitLocalLogging on

# File to store the position in the journal
$IMJournalStateFile imjournal.state


#### RULES ####

# Log all kernel messages to the console.
# Logging much else clutters up the screen.
#kern.*                                                 /dev/console

# Log anything (except mail) of level info or higher.
# Don't log private authentication messages!
*.info;mail.none;authpriv.none;cron.none                /var/log/messages

# The authpriv file has restricted access.
authpriv.*                                              /var/log/secure

# Log all the mail messages in one place.
mail.*                                                  -/var/log/maillog


# Log cron stuff
cron.*                                                  /var/log/cron

# Everybody gets emergency messages
*.emerg                                                 :omusrmsg:*

# Save news errors of level crit and higher in a special file.
uucp,news.crit                                          /var/log/spooler

# Save boot messages also to boot.log
local7.*                                                /var/log/boot.log


# ### begin forwarding rule ###
# The statement between the begin ... end define a SINGLE forwarding
# rule. They belong together, do NOT split them. If you create multiple
# forwarding rules, duplicate the whole block!
# Remote Logging (we use TCP for reliable delivery)
#
# An on-disk queue is created for this action. If the remote host is
# down, messages are spooled to disk and sent when it is up again.
#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
#$ActionQueueMaxDiskSpace 1g   # 1gb space limit (use as much as possible)
#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
#$ActionQueueType LinkedList   # run asynchronously
#$ActionResumeRetryCount -1    # infinite retries if host is down
# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
#*.* @@remote-host:514
# ### end of the forwarding rule ###
[root@elastic-elk-stack ~]# 

重启服务:rsyslog

[root@elastic-elk-stack ~]# service rsyslog status
Redirecting to /bin/systemctl status rsyslog.service
● rsyslog.service - System Logging Service
   Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; vendor preset: enabled)
   Active: active (running) since Wed 2017-12-13 00:26:16 CST; 48min ago
     Docs: man:rsyslogd(8)
           
RSyslog Documentation
Main PID: 760 (rsyslogd) CGroup: /system.slice/rsyslog.service └─760 /usr/sbin/rsyslogd -n Dec 13 00:26:13 elastic-elk-stack systemd[1]: Starting System Logging Service... Dec 13 00:26:13 elastic-elk-stack rsyslogd[760]: [origin software="rsyslogd" swVersion="8.24.0" x-pid="760" x-info="http://www.rsyslog.com"] start Dec 13 00:26:16 elastic-elk-stack systemd[1]: Started System Logging Service. [root@elastic-elk-stack ~]# [root@elastic-elk-stack ~]# service rsyslog restart Redirecting to /bin/systemctl restart rsyslog.service [root@elastic-elk-stack ~]# [root@elastic-elk-stack ~]# service rsyslog status Redirecting to /bin/systemctl status rsyslog.service ● rsyslog.service - System Logging Service Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; vendor preset: enabled) Active: active (running) since Wed 2017-12-13 01:14:51 CST; 6s ago Docs: man:rsyslogd(8)
RSyslog Documentation
Main PID: 6214 (rsyslogd) CGroup: /system.slice/rsyslog.service └─6214 /usr/sbin/rsyslogd -n Dec 13 01:14:51 elastic-elk-stack systemd[1]: Starting System Logging Service... Dec 13 01:14:51 elastic-elk-stack rsyslogd[6214]: [origin software="rsyslogd" swVersion="8.24.0" x-pid="6214" x-info="http://www.rsyslog.com"] start Dec 13 01:14:51 elastic-elk-stack systemd[1]: Started System Logging Service. [root@elastic-elk-stack ~]#

rsyslog – 客户端:

客户端配置前,先看看服务端的路径下的状态

[root@elastic-elk-stack ~]# cd /home/data/rsyslog/log/
[root@elastic-elk-stack log]# ll
total 0
[root@elastic-elk-stack log]# 
[root@elastic-elk-stack log]# 

配置文件修改,然后重启:

[root@elastic-server ~]# cat /etc/rsyslog.conf | tail -n 2 

*.* @@10.158.1.***:514
[root@elastic-server ~]# 
[root@elastic-server ~]# service rsyslog restart
Redirecting to /bin/systemctl restart rsyslog.service
[root@elastic-server ~]# 

查看服务器上的目录的状态:

[root@elastic-elk-stack log]# pwd
/home/data/rsyslog/log
[root@elastic-elk-stack log]# 
[root@elastic-elk-stack log]# ls -ltr
total 0
drwx------ 2 root root 41 Dec 13 01:32 10.158.1.111
drwx------ 2 root root 40 Dec 13 01:34 10.158.1.99
[root@elastic-elk-stack log]# 
[root@elastic-elk-stack log]# tree ../log/
../log/
├── 10.158.1.111
│   └── 10.158.1.111_2017-12-13.log
└── 10.158.1.99
    └── 10.158.1.99_2017-12-13.log

2 directories, 2 files
[root@elastic-elk-stack log]# 

然后,用ELK去抓取rsyslog的数据。

安装:filebeat

[root@elastic-elk-stack software]# rpm -ivh filebeat-6.0.1-x86_64.rpm 
warning: filebeat-6.0.1-x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID d88e42b4: NOKEY
Preparing...                          ################################# [100%]
Updating / installing...
   1:filebeat-6.0.1-1                 ################################# [100%]
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# 

配置:

[root@elastic-elk-stack software]# cat /etc/filebeat/filebeat.yml 
###################### Filebeat Configuration Example #########################

# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html

# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.

#=========================== Filebeat prospectors =============================

filebeat.prospectors:

# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.

- type: log

  # Change to true to enable this prospector configuration.
  enabled: false

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - /var/log/*.log
    #- c:\programdata\elasticsearch\logs\*

  # Exclude lines. A list of regular expressions to match. It drops the lines that are
  # matching any regular expression from the list.
  #exclude_lines: ['^DBG']

  # Include lines. A list of regular expressions to match. It exports the lines that are
  # matching any regular expression from the list.
  #include_lines: ['^ERR', '^WARN']

  # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  # are matching any regular expression from the list. By default, no files are dropped.
  #exclude_files: ['.gz$']

  # Optional additional fields. These fields can be freely picked
  # to add additional information to the crawled log files for filtering
  #fields:
  #  level: debug
  #  review: 1

  ### Multiline options

  # Mutiline can be used for log messages spanning multiple lines. This is common
  # for Java Stack Traces or C-Line Continuation

  # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  #multiline.pattern: ^\[

  # Defines if the pattern set under pattern should be negated or not. Default is false.
  #multiline.negate: false

  # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  #multiline.match: after

- type: log
  enabled: false
  paths:
    - /home/data/rsyslog/log/*/*


#============================= Filebeat modules ===============================

filebeat.config.modules:
  # Glob pattern for configuration loading
  path: ${path.config}/modules.d/*.yml

  # Set to true to enable config reloading
  reload.enabled: false

  # Period on which files under path should be checked for changes
  #reload.period: 10s

#==================== Elasticsearch template setting ==========================

setup.template.settings:
  index.number_of_shards: 3
  #index.codec: best_compression
  #_source.enabled: false

#================================ General =====================================

# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:

# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]

# Optional fields that you can specify to add additional information to the
# output.
#fields:
#  env: staging


#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false

# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:

#============================== Kibana =====================================

# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:

  # Kibana Host
  # Scheme and port can be left out and will be set to the default (http and 5601)
  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  host: "10.158.1.***:5601"

#============================= Elastic Cloud ==================================

# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).

# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:

# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `:`.
#cloud.auth:

#================================ Outputs =====================================

# Configure what output to use when sending the data collected by the beat.

#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
  # Array of hosts to connect to.
  hosts: ["10.158.1.***:9200"]

  # Optional protocol and basic auth credentials.
  #protocol: "https"
  #username: "elastic"
  #password: "changeme"

#----------------------------- Logstash output --------------------------------
#output.logstash:
  # The Logstash hosts
  #hosts: ["localhost:5044"]

  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"

#================================ Logging =====================================

# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: debug

# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
[root@elastic-elk-stack software]# 

启动服务:

[root@elastic-elk-stack software]# service filebeat status
● filebeat.service - filebeat
   Loaded: loaded (/usr/lib/systemd/system/filebeat.service; disabled; vendor preset: disabled)
   Active: inactive (dead)
     Docs: https://www.elastic.co/guide/en/beats/filebeat/current/index.html
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# service filebeat start
Starting filebeat (via systemctl):                         [  OK  ]
[root@elastic-elk-stack software]# 
[root@elastic-elk-stack software]# 

配置LOGSTASH:

[root@elastic-elk-stack ~]# cat /etc/logstash/conf.d/rsyslog.conf 
input {
  #tcp{
  #  port => 514
  #  type => syslog
  #}
  #udp{
  #  port => 514
  #  type => syslog
  #}
  file {
    path => ["/home/data/rsyslog/log/*/*.log"]
    #path => ["/var/log/*"]
    type => "rsyslog"
    start_position => "beginning"
  }
}
output {
  stdout {
    codec=> rubydebug
  }
  elasticsearch {
    hosts => ["10.158.1.***:9200"]
    index => "rsyslog_10.158.1.**-%{+YYYY.MM.dd}"
  }
}
[root@elastic-elk-stack ~]# 

启动:service logstash start

访问:http://ip:9200/_search
可以查看到所有的index。

Kibana中添加项目不再重述,前面的文档中有。

——————————
Done。

说点什么

avatar

此站点使用Akismet来减少垃圾评论。了解我们如何处理您的评论数据

  Subscribe  
提醒