envoy-01-简介
docker pull envoyproxy/envoy-alpine:v1.20-latest
docker pull envoyproxy/envoy-alpine:v1.18-latest
https://github.com/
https://gitee.com/
root@user:~/servicemesh_in_practise/Envoy-Basics/http-ingress# cat docker-compose.yaml version: '3' services: envoy: image: envoyproxy/envoy-alpine:v1.18-latest volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml networks: envoymesh: ipv4_address: 172.31.3.2 aliases: - ingress webserver01: image: ikubernetes/demoapp:v1.0 environment: - PORT=8080 - HOST=127.0.0.1 network_mode: "service:envoy" depends_on: - envoy networks: envoymesh: driver: bridge ipam: config: - subnet: 172.31.3.0/24 root@user:~/servicemesh_in_practise/Envoy-Basics/http-ingress# cat envoy.yaml static_resources: listeners: - name: listener_0 address: socket_address: { address: 0.0.0.0, port_value: 80 } filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: name: local_route virtual_hosts: - name: web_service_1 domains: ["*"] routes: - match: { prefix: "/" } route: { cluster: local_cluster } http_filters: - name: envoy.filters.http.router clusters: - name: local_cluster connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: local_cluster endpoints: - lb_endpoints: - endpoint: address: socket_address: { address: 127.0.0.1, port_value: 8080 }
root@user:~/servicemesh_in_practise/Envoy-Basics/http-ingress# cat docker-compose.yaml
version: '3'
services:
envoy:
image: envoyproxy/envoy-alpine:v1.18-latest
volumes:
- ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.3.2
aliases:
- ingress
webserver01:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:envoy"
depends_on:
- envoy
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.3.0/24
curl 172.31.3.2
ingress
ip1:80-->ip:18080
egress
ip1:80-->ip2:80
root@user:~/servicemesh_in_practise/Envoy-Basics/http-egress# cat envoy.yaml static_resources: listeners: - name: listener_0 address: socket_address: { address: 127.0.0.1, port_value: 80 } filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: name: local_route virtual_hosts: - name: web_service_1 domains: ["*"] routes: - match: { prefix: "/" } route: { cluster: web_cluster } http_filters: - name: envoy.filters.http.router clusters: - name: web_cluster connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: web_cluster endpoints: - lb_endpoints: - endpoint: address: socket_address: { address: 172.31.4.11, port_value: 80 } - endpoint: address: socket_address: { address: 172.31.4.12, port_value: 80 }
root@user:~/servicemesh_in_practise/Envoy-Basics/http-egress# cat docker-compose.yaml
version: '3.3'
services:
envoy:
image: envoyproxy/envoy-alpine:v1.18-latest
volumes:
- ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.4.2
aliases:
- front-proxy
depends_on:
- webserver01
- webserver02
client:
image: ikubernetes/admin-toolbox:v1.0
network_mode: "service:envoy"
depends_on:
- envoy
webserver01:
image: ikubernetes/demoapp:v1.0
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.4.11
aliases:
- webserver01
- webserver
webserver02:
image: ikubernetes/demoapp:v1.0
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.4.12
aliases:
- webserver02
- webserver
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.4.0/24
测试
[root@f1d41e3436f2 /]# curl webserver
iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver02, ServerIP: 172.31.4.12!
[root@f1d41e3436f2 /]# curl webserver
iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver01, ServerIP: 172.31.4.11!
[root@f1d41e3436f2 /]# curl webserver01
iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver01, ServerIP: 172.31.4.11!
[root@f1d41e3436f2 /]# curl webserver02
iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver02, ServerIP: 172.31.4.12!
TCP 过滤器 (L3/L4)
root@user:~/servicemesh_in_practise/Envoy-Basics/tcp-front-proxy# cat envoy.yaml static_resources: listeners: name: listener_0 address: socket_address: { address: 0.0.0.0, port_value: 80 } filter_chains: - filters: - name: envoy.tcp_proxy typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: local_cluster clusters: - name: local_cluster connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: local_cluster endpoints: - lb_endpoints: - endpoint: address: socket_address: { address: 172.31.1.11, port_value: 8080 } - endpoint: address: socket_address: { address: 172.31.1.12, port_value: 8080 } root@user:~/servicemesh_in_practise/Envoy-Basics/tcp-front-proxy# cat docker-compose.yaml version: '3.3' services: envoy: image: envoyproxy/envoy-alpine:v1.18-latest volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml networks: envoymesh: ipv4_address: 172.31.1.2 aliases: - front-proxy depends_on: - webserver01 - webserver02 webserver01: image: ikubernetes/demoapp:v1.0 environment: - PORT=8080 hostname: webserver01 networks: envoymesh: ipv4_address: 172.31.1.11 aliases: - webserver01 webserver02: image: ikubernetes/demoapp:v1.0 environment: - PORT=8080 hostname: webserver02 networks: envoymesh: ipv4_address: 172.31.1.12 aliases: - webserver02 networks: envoymesh: driver: bridge ipam: config: - subnet: 172.31.1.0/24
root@user:~/servicemesh_in_practise/Envoy-Basics/tcp-front-proxy# curl 172.31.1.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.1.2, ServerName: webserver01, ServerIP: 172.31.1.11! root@user:~/servicemesh_in_practise/Envoy-Basics/tcp-front-proxy# curl 172.31.1.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.1.2, ServerName: webserver02, ServerIP: 172.31.1.12!
DNS配置:
envoy可以配置任意数量的上游集群,并由Cluster Manager进行管理;
由集群管理器负责管理的各集群可以由用户静态配置,也可借助于CDS API动态获取
集群中的每个成员由endpoint进行标识,它可由用户静态配置,也可通过EDS或DNS服务动态发现
static: 静态配置
strict DNS:严格DNS,Envoy将持续和异步的解析指定的DNS目标,并将DNS结果中的返回的每个IP地址视为上游集群中可用成员
logic DNS:逻辑DNS,集群仅使用在需要启动新连接时返回的第一个IP地址,而非严格获取DNS查询结果并假设他们构成整个上游集群
clusters: - name: web_cluster connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN load_assignment: cluster_name: web_cluster endpoints: - lb_endpoints: - endpoint: address: socket_address: { address: webserver, port_value: 80 }
[root@f1d41e3436f2 /]$ curl webserver iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver02, ServerIP: 172.31.4.12! [root@f1d41e3436f2 /]$ curl webserver iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver01, ServerIP: 172.31.4.11!Envoy通过侦听器监听套接字并接收客户端请求,而Envoy的所有工作线程会同时共同监听用户配置的所有套接字,对于某次连接请求,由内核负责将其派发至某个具体的工作线程处理; 随后,相关的工作线程基于特定的处理逻辑分别由相关组件 依次完成连接管理;
综合安全和使用
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0 #127.0.0.1
port_value: 9901
curl 172.31.5.2:9901/help
admin commands are: /: Admin home page # GET /certs: print certs on machine # GET,列出已加载的所有TLS证书及相关的信息; /clusters: upstream cluster status # GET,额外支持使用“GET /clusters?format=json” /config_dump: dump current Envoy configs (experimental) # GET,打印Envoy加载的各类配置信息; /contention: dump current Envoy mutex contention stats (if enabled) # GET,互斥跟踪 /cpuprofiler: enable/disable the CPU profiler # POST,启用或禁用cpuprofiler /healthcheck/fail: cause the server to fail health checks # POST,强制设定HTTP健康状态检查为失败; /healthcheck/ok: cause the server to pass health checks # POST,强制设定HTTP健康状态检查为成功; /heapprofiler: enable/disable the heap profiler # POST,启用或禁用heapprofiler; /help: print out list of admin commands /hot_restart_version: print the hot restart compatibility version # GET,打印热重启相关的信息; /listeners: print listener addresses # GET,列出所有侦听器,支持使用“GET /listeners?format=json” /logging: query/change logging levels # POST,启用或禁用不同子组件上的不同日志记录级别 /memory: print current allocation/heap usage # POST,打印当前内在分配信息,以字节为单位; /quitquitquit: exit the server # POST,干净退出服务器; /reset_counters: reset all counters to zero # POST,重围所有计数器; /runtime: print runtime values # GET,以json格式输出所有运行时相关值; /runtime_modify: modify runtime values # POST /runtime_modify?key1=value1&key2=value2,添加或修改在查询参数中传递的运行时值 /server_info: print server version/status information # GET,打印当前Envoy Server的相关信息; /stats: print server stats # 按需输出统计数据,例如GET /stats?filter=regex,另外还支持json和prometheus两种输出格式; /stats/prometheus: print server stats in prometheus format:输出prometheus格式的统计信息;运行时配置
layered_runtime: layers: - name: admin admin_layer: {}
curl -XPOST 172.31.5.2:9901/runtime_modify?
TLS
6个Service
front-envoy:Front Proxy,地址为172.17.0.3
- 3个http后端服务,仅是用于提供测试用的上游服务器,可统一由myservice名称解析到;front-envoy会通过http(会自动跳转至https)和https侦听器接收对这些服务的访问请求,并将其转为http请求后转至后端服务上; (https-http)
- service-blue
- service-red
- service-green
- service-gray:同时提供http和https侦听器,front-envoy在其cluster配置中,会向该服务发起https请求,并会验证其数字证书;(http-https, https-https)
- service-purple:同时提供http和https侦听器,通过http接收的请求会自动重定向至https,并且https侦听器强制要求验证客户端证书;front-envoy在其cluster配置中,会向该服务发起https请求,向其提供自身的客户端证书后,并会验证其数字证书;
root@user:~/servicemesh_in_practise/Security/tls-static# ./gencerts.sh Certificate Name and Certificate Extenstions(envoy_server_cert/envoy_client_cert): front-envoy envoy_server_cert Certificate Name and Certificate Extenstions(envoy_server_cert/envoy_client_cert): front-envoy envoy_client_cert Certificate Name and Certificate Extenstions(envoy_server_cert/envoy_client_cert): service-gray envoy_server_cert Certificate Name and Certificate Extenstions(envoy_server_cert/envoy_client_cert): service-purple envoy_server_cert enter
root@user:~/servicemesh_in_practise/Security/tls-static# chown -R 100.101 certs
root@user:~/hub/servicemesh_in_practise/security/tls-static_bak# cat front-envoy.yaml admin: access_log_path: "/dev/null" address: socket_address: address: 0.0.0.0 port_value: 9901 static_resources: secrets: - name: server_cert tls_certificate: certificate_chain: filename: "/etc/envoy/certs/server.crt" private_key: filename: "/etc/envoy/certs/server.key" - name: client_cert tls_certificate: certificate_chain: filename: "/etc/envoy/certs/client.crt" private_key: filename: "/etc/envoy/certs/client.key" - name: validation_context validation_context: trusted_ca: filename: "/etc/envoy/ca/ca.crt" listeners: - name: listener_http address: socket_address: { address: 0.0.0.0, port_value: 80 } filter_chains: - filters: - name: envoy.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: backend domains: ["*"] routes: - match: prefix: "/" redirect: https_redirect: true port_redirect: 443 http_filters: - name: envoy.router typed_config: {} - name: listener_https address: socket_address: { address: 0.0.0.0, port_value: 443 } filter_chains: - filters: - name: envoy.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_https codec_type: AUTO route_config: name: https_route virtual_hosts: - name: https_route domains: ["*"] routes: - match: prefix: "/service/gray" route: cluster: service-gray - match: prefix: "/service/purple" route: cluster: service-purple - match: prefix: "/" route: cluster: mycluster http_filters: - name: envoy.router typed_config: {} tls_context: common_tls_context: tls_certificate_sds_secret_configs: - name: server_cert clusters: - name: mycluster connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN http2_protocol_options: {} load_assignment: cluster_name: mycluster endpoints: - lb_endpoints: - endpoint: address: socket_address: address: myservice port_value: 80 - name: service-gray connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN http2_protocol_options: {} load_assignment: cluster_name: service-gray endpoints: - lb_endpoints: - endpoint: address: socket_address: address: service-gray port_value: 443 tls_context: common_tls_context: validation_context_sds_secret_config: name: validation_context - name: service-purple connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN http2_protocol_options: {} load_assignment: cluster_name: service-purple endpoints: - lb_endpoints: - endpoint: address: socket_address: address: service-purple port_value: 443 tls_context: common_tls_context: tls_certificate_sds_secret_configs: - name: client_cert validation_context_sds_secret_config: name: validation_context
root@user:~/hub/servicemesh_in_practise/security/tls-static_bak# cat docker-compose.yaml version: '3' services: front-envoy: image: envoyproxy/envoy-alpine:v1.11.2 volumes: - ./front-envoy.yaml:/etc/envoy/envoy.yaml - ./certs/front-envoy/:/etc/envoy/certs/ - ./certs/CA/:/etc/envoy/ca/ networks: - envoymesh expose: # Expose ports 80 (for general traffic) and 9901 (for the admin server) - "80" - "443" - "9901" ports: - "8080:80" - "8443:443" - "9901:9901" blue: image: ikubernetes/servicemesh-app:latest networks: envoymesh: aliases: - myservice - service-blue - blue environment: - SERVICE_NAME=blue expose: - "80" green: image: ikubernetes/servicemesh-app:latest networks: envoymesh: aliases: - myservice - service-green - green environment: - SERVICE_NAME=green expose: - "80" red: image: ikubernetes/servicemesh-app:latest networks: envoymesh: aliases: - myservice - service-red - red environment: - SERVICE_NAME=red expose: - "80" gray: image: ikubernetes/servicemesh-app:latest volumes: - ./service-gray.yaml:/etc/envoy/envoy.yaml - ./certs/service-gray/:/etc/envoy/certs/ networks: envoymesh: aliases: - gray - service-gray environment: - SERVICE_NAME=gray expose: - "80" - "443" purple: image: ikubernetes/servicemesh-app:latest volumes: - ./service-purple.yaml:/etc/envoy/envoy.yaml - ./certs/service-purple/:/etc/envoy/certs/ - ./certs/CA/:/etc/envoy/ca/ networks: envoymesh: aliases: - purple - service-purple environment: - SERVICE_NAME=purple expose: - "80" - "443" networks: envoymesh: {}
root@user:~/hub/servicemesh_in_practise/security/tls-static_bak# docker-compose ps Name Command State Ports --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tls-static_bak_blue_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp tls-static_bak_front-envoy_1 /docker-entrypoint.sh envo ... Up 10000/tcp, 0.0.0.0:8443->443/tcp,:::8443->443/tcp, 0.0.0.0:8080->80/tcp,:::8080->80/tcp, 0.0.0.0:9901->9901/tcp,:::9901->9901/tcp tls-static_bak_gray_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 443/tcp, 80/tcp tls-static_bak_green_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp tls-static_bak_purple_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 443/tcp, 80/tcp tls-static_bak_red_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp