收集 kubernetes 集群中以 Pod 方式运行的应用日志 ¶
通过在应用程序 Pod 中运行 filebeat(sidecar) 实现,本次将以 tomcat 为例。
准备 tomcat 数据目录 ¶
默认 tomcat 容器中没有网站首页文件,不添加会导致 pod 中容器无法正常运行。
mkdir /opt/tomcat
echo "tomcat running" > /opt/tomcat/index.html
编写 tomcat 应用资源清单文件 ¶
tomcat-logs.yaml
tomcat-logs.yaml
cat > tomcat-logs.yaml << "EOF"
apiVersion: apps/v1
kind: Deployment
metadata:
name: tomcat-demo
namespace: default
spec:
replicas: 2
selector:
matchLabels:
project: www
app: tomcat-demo
template:
metadata:
labels:
project: www
app: tomcat-demo
spec:
nodeName: k8s-worker01
containers:
- name: tomcat
image: tomcat:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 1Gi
limits:
cpu: 1
memory: 2Gi
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
volumeMounts:
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs
- name: tomcat
mountPath: /usr/local/tomcat/webapps/ROOT
- name: filebeat
image: docker.io/elastic/filebeat:7.17.2
imagePullPolicy: IfNotPresent
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs
volumes:
- name: tomcat-logs
emptyDir: {}
- name: tomcat
hostPath:
path: /opt/tomcat
type: Directory
- name: filebeat-config
configMap:
name: filebeat-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: default
data:
filebeat.yml: |-
filebeat.inputs:
- type: log
paths:
- /usr/local/tomcat/logs/catalina.*
#给当前的监听到的日志增加一个标签名称
tags: ["tomcat_access_log"]
fields:
app: www
type: tomcat-catalina
fields_under_root: true
multiline:
pattern: '^\['
negate: true
match: after
setup.ilm.enabled: false
setup.template.name: "tomcat-catalina"
setup.template.pattern: "tomcat-catalina-*"
output.kafka:
codec.format:
string: '%{[@timestamp]} %{[message]}'
#自己kafka的服务地址
hosts: ["192.168.3.40:9092"]
topic: 'tomcat-log-topic'
#控制分区行为
partition.round_robin:
reachable_only: false
#kafka的请求确认等级
required_acks: 1
#kafka的压缩
compression: gzip
#kafka最大消息大小设置
max_message_bytes: 1000000
EOF
编写 logstash 配置文件 ¶
cat > tomcat-logstash-to-elastic.conf << "EOF"
input {
kafka {
bootstrap_servers => "192.168.3.40:9092"
group_id => "logstash-group"
topics => ["tomcat-log-topic"]
consumer_threads => 3 # #消费线程数,集群中所有logstash相加最好等于 topic 分区数
auto_offset_reset => "earliest"
}
}
filter {
# 这里可以添加过滤器配置
}
output {
stdout { codec => rubydebug }
elasticsearch {
hosts => ["http://192.168.1.99:9200"]
user => "elastic"
password => "datarc"
index => "tomcat-kafka-%{+YYYY.MM.dd}"
}
}
EOF
应用 tomcat 应用资源清单文件 ¶
kubectl apply -f tomcat-logs.yaml
kubectl get deployment.apps
kubectl get pods
验证 Pod 中 tomcat 及 filebeat 是否正常 ¶
# 查看 tomcat 产生日志
[root@k8s-master1 ~]# kubectl logs tomcat-demo-75df89b486-5c45c -c tomcat
# 查看 filebeat 收集日志
[root@k8s-master1 ~]# kubectl logs tomcat-demo-75df89b486-5c45c -c filebeat