1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter
metadata:
name: api-envoy-filter
namespace: istio-system
spec:
configPatches:
- applyTo: NETWORK_FILTER
match:
context: GATEWAY
listener:
filterChain:
filter:
name: envoy.filters.network.http_connection_manager
patch:
operation: MERGE
value:
typed_config:
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
preserve_external_request_id: true

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
function trim_prefix(s, p)
return (s:sub(0, #p) == p) and s:sub(#p + 1) or s
end

function envoy_on_request(request_handle)
local idx = 0
for chunk in request_handle:bodyChunks() do
local len = chunk:length()
if len == 0 then
break
end
local body = chunk:getBytes(idx, len)
idx = idx + len
local hds = {}
for key, value in pairs(request_handle:headers()) do
hds["y-" .. trim_prefix(key, ":")] = value
end
hds[":method"] = "POST"
hds[":path"] = "/log"
hds[":authority"] = "envoy"
request_handle:httpCall("http-log-service", hds, body, 10000, true)
end
end

function envoy_on_response(response_handle)
end
1
2
3
4
5
6
7
8
9
10
11
12
13
14
static_resources:
clusters:
- name: http-log-service
type: STRICT_DNS
connect_timeout: 10s
load_assignment:
cluster_name: http-log-service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9709

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
package main

import (
"bytes"
"encoding/json"
"os"
"testing"

"gopkg.in/yaml.v3"

"google.golang.org/protobuf/encoding/protojson"

bootstrap "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
)

func TestGenBootstrapYAML(t *testing.T) {
cfg, err := initConfig("config.yaml")
if err != nil {
t.Fatal(err)
}

b := &bootstrap.Bootstrap{
Admin: &bootstrap.Admin{
AccessLog: nil,
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
Address: "0.0.0.0",
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 9901,
},
},
},
},
},
StaticResources: &bootstrap.Bootstrap_StaticResources{
Listeners: make([]*listener.Listener, 2),
Clusters: make([]*cluster.Cluster, len(cfg.Clusters)),
},
}

virtualHosts := genVirtualHosts(cfg)

b.StaticResources.Listeners[0] = makeListener(ListenerHTTPPort, false, virtualHosts)
b.StaticResources.Listeners[1] = makeListener(ListenerHTTPSPort, true, virtualHosts)

for i := 0; i < len(cfg.Clusters); i++ {
b.StaticResources.Clusters[i] = makeCluster(cfg.Clusters[i])
}

mo := protojson.MarshalOptions{UseProtoNames: true}

bs, err := mo.Marshal(b)
if err != nil {
t.Fatal(err)
}

var v any

err = json.Unmarshal(bs, &v)
if err != nil {
t.Fatal(err)
}

bb := &bytes.Buffer{}
ye := yaml.NewEncoder(bb)
ye.SetIndent(2)
defer func() { _ = ye.Close() }()

err = ye.Encode(v)
if err != nil {
t.Fatal(err)
}

err = os.WriteFile("envoy-bootstrap.yaml", bb.Bytes(), 0644)
if err != nil {
t.Fatal(err)
}
}

前言

该篇文章所展示代码只展示大概思路,部分代码因为涉及到公司内部业务,所以没有展示出来。

代码

  • main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
package main

import (
"context"
"flag"
"log"
"net"
"path/filepath"

"google.golang.org/grpc"

"github.com/google/uuid"

"github.com/envoyproxy/go-control-plane/pkg/server/v3"
)

var (
_addr string
_config string

VERSION string
BIDTIME string
)

func run(ctx context.Context) {
log.Printf("version: %s, bidtime: %s", VERSION, BIDTIME)

cfg, err := initConfig(_config)
if err != nil {
log.Fatalf("init config error: %s", err)
}
printConfig(cfg)

grpcServer := newGRPCServer()
envoyCache, envoyServer := newEnvoyServer(ctx)

version, err := uuid.NewRandom()
if err != nil {
log.Fatalf("generate version error: %s", err)
}

snapshot := genSnapshot(cfg, version.String())
err = snapshot.Consistent()
if err != nil {
log.Fatalf("snapshot inconsistent: %s", err)
}

err = envoyCache.SetSnapshot(ctx, cfg.Node, snapshot)
if err != nil {
log.Fatalf("set snapshot error: %s", err)
}

log.Printf("snapshot version: %s", version)

go func() { runServer(_addr, grpcServer, envoyServer) }()

s := <-sig
log.Printf("receive signal: %s", s)

grpcServer.Stop()

log.Printf("bye")

close(cls)
}

func runServer(address string, grpcServer *grpc.Server, envoyServer server.Server) {
ln, err := net.Listen("tcp", address)
if err != nil {
log.Fatalf("listen error: %s", err)
}

registerServer(grpcServer, envoyServer)

log.Printf("start grpc server success on %s", address)

err = grpcServer.Serve(ln)
if err != nil {
log.Fatalf("serve error: %s", err)
}
}
  • config.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
package main

type Config struct {
Node string `json:"node,omitempty" yaml:"node"`
Include []string `json:"include,omitempty" yaml:"include"`
Cors *iCors `json:"cors,omitempty" yaml:"cors"`
Routes []*iRoute `json:"routes,omitempty" yaml:"routes"`
Clusters []*iCluster `json:"clusters,omitempty" yaml:"clusters"`
TerminalDomain string `json:"terminal_domain" yaml:"terminal_domain"`
}

type iCors struct {
AllowMethods []string `json:"allow_methods,omitempty" yaml:"allow_methods"`
AllowHeaders []string `json:"allow_headers,omitempty" yaml:"allow_headers"`
ExposeHeaders []string `json:"expose_headers,omitempty" yaml:"expose_headers"`
}

type iRoute struct {
Match *iRouteMatch `json:"match,omitempty" yaml:"match"`
Route *iRouteRoute `json:"route,omitempty" yaml:"route"`
RequestHeadersToAdd []*iRouteHeader `json:"request_headers_to_add,omitempty" yaml:"request_headers_to_add"`
}

type iRouteMatch struct {
Prefix string `json:"prefix,omitempty" yaml:"prefix"`
Header *iRouteMatchHeader `json:"header,omitempty" yaml:"header"`
}

type iRouteMatchHeader struct {
Name string `json:"name,omitempty" yaml:"name"`
ExactMatch string `json:"exact_match,omitempty" yaml:"exact_match"`
Re2Match string `json:"re2_match,omitempty" yaml:"re2_match"`
}

type iRouteRoute struct {
Cluster string `json:"cluster,omitempty" yaml:"cluster"`
Timeout time.Duration `json:"timeout,omitempty" yaml:"timeout"`
PrefixRewrite string `json:"prefix_rewrite,omitempty" yaml:"prefix_rewrite"`
HostRewriteLiteral string `json:"host_rewrite_literal,omitempty" yaml:"host_rewrite_literal"`
}

type iRouteHeader struct {
Header *iKeyValue `json:"header,omitempty" yaml:"header"`
}

type iKeyValue struct {
Key string `json:"key,omitempty" yaml:"key"`
Value string `json:"value,omitempty" yaml:"value"`
}

type iCluster struct {
Name string `json:"name,omitempty" yaml:"name"`
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint"`
Http2 bool `json:"http2,omitempty" yaml:"http2"`
Https bool `json:"https,omitempty" yaml:"https"`

Host string `json:"host,omitempty" yaml:"host"`
Port uint32 `json:"port,omitempty" yaml:"port"`
}
  • server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
package main

import (
"context"
"time"

"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"

"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
"github.com/envoyproxy/go-control-plane/pkg/server/v3"

discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
)

func newGRPCServer() *grpc.Server {
grpcServer := grpc.NewServer(
grpc.MaxConcurrentStreams(1000000),
grpc.KeepaliveParams(keepalive.ServerParameters{
Time: 30 * time.Second,
Timeout: 3 * time.Second,
}),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: 30 * time.Second,
PermitWithoutStream: true,
}),
)
return grpcServer
}

func newEnvoyServer(ctx context.Context) (cache.SnapshotCache, server.Server) {
envoyCache := cache.NewSnapshotCache(false, cache.IDHash{}, &envoyLogger{})
envoyServer := server.NewServer(ctx, envoyCache, nil)
return envoyCache, envoyServer
}

func registerServer(grpcServer *grpc.Server, envoyServer server.Server) {
discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, envoyServer)
}
  • resource.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
package main

import (
"log"
"strconv"
"strings"

"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"

"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"

accesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"

file "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
gzip "github.com/envoyproxy/go-control-plane/envoy/extensions/compression/gzip/compressor/v3"
compressor "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/compressor/v3"
cors "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
lua "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3"
router "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"

matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)

func genSnapshot(cfg *Config, version string) *cache.Snapshot {
virtualHosts := genVirtualHosts(cfg)
resources := map[resource.Type][]types.Resource{
resource.ListenerType: {
makeListener(ListenerHTTPPort, false, virtualHosts),
makeListener(ListenerHTTPSPort, true, virtualHosts),
},
resource.ClusterType: genClusters(cfg.Clusters),
}
snap, err := cache.NewSnapshot(version, resources)
if err != nil {
log.Fatalf("failed to create snapshot: %s", err)
}
return snap
}

...

envoy.yaml

1
docker run -d --name envoy -v `pwd`/envoy.yaml:/etc/envoy/envoy.yaml -p 9901:9901 -p 10000:10000 envoyproxy/envoy:v1.23-latest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
admin:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }

node:
id: arc-os
cluster: arc-os

dynamic_resources:
ads_config:
api_type: GRPC
transport_api_version: V3
grpc_services:
- envoy_grpc:
cluster_name: ads_cluster
cds_config:
resource_api_version: V3
ads: { }
lds_config:
resource_api_version: V3
ads: { }

static_resources:
clusters:
- name: ads_cluster
connect_timeout: 30s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
http2_protocol_options: { }
load_assignment:
cluster_name: ads_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.17.0.1
port_value: 9777

Ref

  • https://github.com/envoyproxy/go-control-plane
  • https://github.com/envoyproxy/go-control-plane/blob/v0.11.0/internal/example/main/main.go

配置

基于 v2.7.0 的配置文件,修改了 ClusterRoleClusterRoleBinding,使得 dashboard 只能以只读的方式访问 kubernetes 集群。

首先需要修改 dashboard 启动 args,移除 --auto-generate-certificates,添加 --enable-insecure-login--enable-skip-login

然后修改 Service 端口为 9090,并修改 Deployment 健康检查,根据权限修改 ClusterRole

最后通过访问 kubernetes-dashboard.kubernetes-dashboard.svc.cluster.local:9090 即可。

完整配置

注意:以下配置中禁用了对 secret 的访问权限

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 9090
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: ConfigMap
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "persistentvolumes", "persistentvolumeclaims", "persistentvolumeclaims/status", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "services/status"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["bindings", "events", "limitranges", "namespaces/status", "pods/log", "pods/status", "replicationcontrollers/status", "resourcequotas", "resourcequotas/status"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["namespaces", "nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status", "replicasets", "replicasets/scale", "replicasets/status", "statefulsets", "statefulsets/scale", "statefulsets/status"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers", "horizontalpodautoscalers/status"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "cronjobs/status", "jobs", "jobs/status"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status", "ingresses", "ingresses/status", "networkpolicies", "replicasets", "replicasets/scale", "replicasets/status", "replicationcontrollers/scale"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets", "poddisruptionbudgets/status"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["networkpolicies", "ingresses", "ingresses/status", "ingressclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings", "clusterroles", "roles", "rolebindings"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.istio.io"]
resources: ["gateways", "envoyfilters", "virtualservices"]
verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 9090
protocol: TCP
args:
- --namespace=kubernetes-dashboard
- --enable-insecure-login
- --enable-skip-login
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

---

apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper

---

apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

Ref

  • https://github.com/kubernetes/dashboard/tree/v2.7.0
  • https://github.com/kubernetes/dashboard/blob/v2.7.0/docs/common/dashboard-arguments.md
  • https://gist.github.com/karthik101/201374aee2ebea25ddf6c723858568be