From ff95caeba9ae7b0f931eeddf3647fb5f488260b0 Mon Sep 17 00:00:00 2001
From: bzsuni <86399306+bzsuni@users.noreply.github.com>
Date: Thu, 28 Dec 2023 20:09:01 +0800
Subject: [PATCH] Add test case R00001 (#1068)
---
go.mod | 3 -
go.sum | 6 -
test/doc/reliability.md | 14 +-
test/doc/reliability_zh.md | 25 +-
test/e2e/common/deploy.go | 12 +-
test/e2e/common/egep.go | 13 -
test/e2e/common/egp.go | 46 ++
test/e2e/common/kwok.go | 28 +-
test/e2e/common/node.go | 7 +-
test/e2e/common/pod.go | 26 +-
.../egressendpointslice_test.go | 8 +-
test/e2e/err/err.go | 4 +-
.../e2e/reliability/reliability_suite_test.go | 3 +
test/e2e/reliability/reliability_test.go | 199 ++++++++-
.../LICENSE | 201 ---------
.../pkg/apis/k8s.cni.cncf.io/register.go | 5 -
.../pkg/apis/k8s.cni.cncf.io/v1/doc.go | 5 -
.../pkg/apis/k8s.cni.cncf.io/v1/register.go | 41 --
.../pkg/apis/k8s.cni.cncf.io/v1/types.go | 180 --------
.../v1/zz_generated.deepcopy.go | 202 ---------
.../spidernet-io/e2eframework/LICENSE | 201 ---------
.../e2eframework/framework/command.go | 41 --
.../e2eframework/framework/configmap.go | 73 ----
.../e2eframework/framework/daemonset.go | 186 ---------
.../e2eframework/framework/deployment.go | 277 ------------
.../e2eframework/framework/endpoint.go | 56 ---
.../e2eframework/framework/error.go | 14 -
.../e2eframework/framework/events.go | 58 ---
.../e2eframework/framework/framework.go | 286 -------------
.../e2eframework/framework/job.go | 128 ------
.../e2eframework/framework/multus.go | 52 ---
.../e2eframework/framework/namespace.go | 125 ------
.../e2eframework/framework/node.go | 94 -----
.../e2eframework/framework/pod.go | 395 ------------------
.../e2eframework/framework/replicaset.go | 161 -------
.../e2eframework/framework/service.go | 64 ---
.../e2eframework/framework/serviceaccounts.go | 48 ---
.../e2eframework/framework/statefulset.go | 169 --------
.../e2eframework/framework/vagrant.go | 5 -
.../spidernet-io/e2eframework/tools/tools.go | 57 ---
vendor/k8s.io/kubectl/LICENSE | 201 ---------
.../kubectl/pkg/util/podutils/podutils.go | 240 -----------
vendor/modules.txt | 11 -
43 files changed, 316 insertions(+), 3654 deletions(-)
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/LICENSE
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go
delete mode 100644 vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/LICENSE
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/command.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/configmap.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/daemonset.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/deployment.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/endpoint.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/error.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/events.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/framework.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/job.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/multus.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/namespace.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/node.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/pod.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/replicaset.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/service.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/serviceaccounts.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/statefulset.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/framework/vagrant.go
delete mode 100644 vendor/github.com/spidernet-io/e2eframework/tools/tools.go
delete mode 100644 vendor/k8s.io/kubectl/LICENSE
delete mode 100644 vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
diff --git a/go.mod b/go.mod
index 2c53dc818..4f67b0efd 100644
--- a/go.mod
+++ b/go.mod
@@ -26,7 +26,6 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.18.2
- github.com/spidernet-io/e2eframework v0.0.0-20230403061847-445757b963b3
github.com/stretchr/testify v1.8.4
github.com/tigera/operator v1.32.3
github.com/vishvananda/netlink v1.2.1-beta.2.0.20230130171208-05506ada9f99
@@ -90,7 +89,6 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -147,7 +145,6 @@ require (
k8s.io/component-base v0.28.1 // indirect
k8s.io/klog/v2 v2.110.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
- k8s.io/kubectl v0.27.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
diff --git a/go.sum b/go.sum
index 637da4fa1..ce56deaaa 100644
--- a/go.sum
+++ b/go.sum
@@ -337,8 +337,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 h1:VzM3TYHDgqPkettiP6I6q2jOeQFL4nrJM+UcAc4f6Fs=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0/go.mod h1:nqCI7aelBJU61wiBeeZWJ6oi4bJy5nrjkM6lWIMA4j0=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -490,8 +488,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
-github.com/spidernet-io/e2eframework v0.0.0-20230403061847-445757b963b3 h1:DwiGehYumYZUF3wUVQqSaY/WvpZ71zLeIJ1WaSNQn20=
-github.com/spidernet-io/e2eframework v0.0.0-20230403061847-445757b963b3/go.mod h1:fCnYp0IxYHYmGMMm7WDy8pTb3BICSMV2Z7sro9vJhrs=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -996,8 +992,6 @@ k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
-k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg=
-k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
diff --git a/test/doc/reliability.md b/test/doc/reliability.md
index 703a2401f..4f3aab076 100644
--- a/test/doc/reliability.md
+++ b/test/doc/reliability.md
@@ -2,11 +2,9 @@
| Case ID | Title | Priority | Smoke | Status | Other |
|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------|--------|-------|
-| R00001 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | | |
-| R00002 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
After restarting `Deployment` successfully, all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | | |
-| R00003 | Use `kwok` to create 1000 `Node`, create EgressGateway and set `NodeSelector` to match the created 1000 `node`, `EgressGatewayStatus.NodeList` will be updated as expected.
Change `NodeSelector` to not match the created `node`, `EgressGatewayStatus.NodeList` will be updated as expected | p3 | false | | |
-| R00004 | Use `kwok` to create 10 `Node`, create 1000 single-replicas `Deployment`, and create 1000 `Policy` correspondingly, set `EgressIP.AllocatorPolicy` to `RR` mode,
after creating successfully `eip` will be evenly distributed on each node | p3 | false | | |
-| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | | |
-| R00006 | After shutting down all nodes matched by `NodeSelector` in `egressGateway`,
`Pod`’s egress IP will be changed from `eip` to non-`eip`, `egressGatewayStatus.NodeList` will be empty, and the related `EgressIgnoreCIDR.NodeIP` will be deleted and the `EgressTunnel` corresponding to the shutdown node will be deleted.
After one of the `node` is turned on, `egressgateway` will recover in a short time and record the recovery time, and `eip` will be revalidated as the egress IP of `Pod`, and the `nodeIP` will be added to `EgressIgnoreCIDR.NodeIP` and `node` related information in `egressGatewayStatus.NodeList` is updated correctly,
after all boots, `eip` will only take effect on the first recovered `node`, and `EgressIgnoreCIDR.NodeIP` is updated correct | p3 | false | | |
-| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | | |
-| R00008 | Create an `egressGateway` with a pool of 100 IPs. Create 120 `policies`. After multiple deletions and creations, expect the `egressGateway` and `Policy` statuses to be correct, and the `pod`'s egress IPs to match expectations | p1 | true | | |
\ No newline at end of file
+| R00001 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | done | |
+| R00002 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
After restarting `Deployment` successfully, all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | done | |
+| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | done | |
+| R00006 | After shutting down all nodes matched by `NodeSelector` in `egressGateway`,
`Pod`’s egress IP will be changed from `eip` to non-`eip`, `egressGatewayStatus.NodeList` will be empty, and the related `EgressIgnoreCIDR.NodeIP` will be deleted and the `EgressTunnel` corresponding to the shutdown node will be deleted.
After one of the `node` is turned on, `egressgateway` will recover in a short time and record the recovery time, and `eip` will be revalidated as the egress IP of `Pod`, and the `nodeIP` will be added to `EgressIgnoreCIDR.NodeIP` and `node` related information in `egressGatewayStatus.NodeList` is updated correctly,
after all boots, `eip` will only take effect on the first recovered `node`, and `EgressIgnoreCIDR.NodeIP` is updated correct | p3 | false | done | |
+| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | done | |
+| R00008 | Create an `egressGateway` with a pool of 100 IPs. Create 120 `policies`. After multiple deletions and creations, expect the `egressGateway` and `Policy` statuses to be correct, and the `pod`'s egress IPs to match expectations | p1 | true | done | |
\ No newline at end of file
diff --git a/test/doc/reliability_zh.md b/test/doc/reliability_zh.md
index f78411d64..85da78e4e 100644
--- a/test/doc/reliability_zh.md
+++ b/test/doc/reliability_zh.md
@@ -3,23 +3,20 @@
| Case ID | Title | Priority | Smoke | Status | Other |
|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------|--------|-------|
-| R00001 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | | |
-| R00002 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
After restarting `Deployment` successfully, all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | | |
-| R00003 | Use `kwok` to create 1000 `Node`, create EgressGateway and set `NodeSelector` to match the created 1000 `node`, `EgressGatewayStatus.NodeList` will be updated as expected.
Change `NodeSelector` to not match the created `node`, `EgressGatewayStatus.NodeList` will be updated as expected | p3 | false | | |
-| R00004 | Use `kwok` to create 10 `Node`, create 1000 single-replicas `Deployment`, and create 1000 `Policy` correspondingly, set `EgressIP.AllocatorPolicy` to `RR` mode,
after creating successfully `eip` will be evenly distributed on each node | p3 | false | | |
-| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | | |
+| R00001 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | done | |
+| R00002 | Use `kwok` to create 10 `Node`, create `Deployment` with 1000 replicas, create `Policy` and set `PodSelector` to match `Deployment`,
After restarting `Deployment` successfully, all matched `Pod`'s egress IP in the real node is `eip` | p3 | false | done | |
+| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | done | |
| R00006 | After shutting down all nodes matched by `NodeSelector` in `egressGateway`,
`Pod`’s egress IP will be changed from `eip` to non-`eip`, `egressGatewayStatus.NodeList` will be empty, and the related `EgressIgnoreCIDR.NodeIP` will be deleted and the `EgressTunnel` corresponding to the shutdown node will be deleted.
After one of the `node` is turned on, `egressgateway` will recover in a short time and record the recovery time, and `eip` will be revalidated as the egress IP of `Pod`, and the `nodeIP` will be added to `EgressIgnoreCIDR.NodeIP` and `node` related information in `egressGatewayStatus.NodeList` is updated correctly,
after all boots, `eip` will only take effect on the first recovered `node`, and `EgressIgnoreCIDR.NodeIP` is updated correct | p3 | false | | |
-| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | | |
+| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | done | |
+| R00008 | Create an `egressGateway` with a pool of 100 IPs. Create 120 `policies`. After multiple deletions and creations, expect the `egressGateway` and `Policy` statuses to be correct, and the `pod`'s egress IPs to match expectations | p1 | true | done | | | p1 | false | | |
-->
# Reliability E2E 用例
| 用例编号 | 标题 | 优先级 | 冒烟 | 状态 | 其他 |
|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----|-------|-----|-----|
-| R00001 | 使用 `kwok` 创建 10 个 `Node`,创建 1000 个副本的 `Deployment`,创建 `Policy` 并设置 `PodSelector`,使之与 `Deployment` 匹配,
真实节点中匹配到的所有 `Pod` 的出口 IP 为 `eip` | p3 | false | | |
-| R00002 | 使用 `kwok` 创建 10 个 `Node`,创建 1000 个副本的 `Deployment`,创建 `Policy` 并设置 `PodSelector`,使之与 `Deployment` 匹配,
重启 `Deployment` 成功后, 真实节点中匹配到的所有 `Pod` 的出口 IP 为 `eip` | p3 | false | | |
-| R00003 | 使用 `kwok` 创建 1000 个 `Node`,创建 EgressGateway 并设置 `NodeSelector` 匹配创建的 1000 个 `node`,`EgressGatewayStatus.NodeList` 会如期更新。
更改 `NodeSelector` 使之与创建的 `node` 不匹配,`EgressGatewayStatus.NodeList` 会如期更新 | p3 | false | | |
-| R00004 | 使用 `kwok` 创建 10 个 `Node`,创建 1000 个单副本的 `Deployment`,并对应创建 1000 个 `Policy` 设置 `EgressIP.AllocatorPolicy` 为轮询模式,
创建成功后 `eip` 会在各个节点上平均分配 | p3 | false | | |
-| R00005 | 当关机 `eip` 生效的节点后,`eip` 会生效到另外匹配 `NodeSelector` 的节点上,
并且 `egressGatewayStatus` 及 `EgressClusterStatus` 如预期更新,与被关机的节点对应的 `EgressTunnel` 将被删除,出口 IP 如预期访问 | p3 | false | | |
-| R00006 | 当关机 `egressGateway` 中 `NodeSelector` 匹配的所有节点后,
`Pod` 的出口 IP 将由 `eip` 改为非 `eip`,`egressGatewayStatus.NodeList` 将为空,相关的 `EgressIgnoreCIDR.NodeIP` 将被删除,与被关机的节点对应的 `EgressTunnel` 将被删除。
将其中一个 `node` 开机后,`egressgateway` 会在短时间内恢复并记录恢复时间,并且 `eip` 重新生效为 `Pod` 的出口 IP,`EgressIgnoreCIDR.NodeIP` 将对应的 `nodeIP` 添加并且 `egressGatewayStatus.NodeList` 中 `node` 相关信息更新正确,
全部开机最后 `eip` 只会生效在第一个恢复的 `node` 上,`EgressIgnoreCIDR.NodeIP` 更新正确 | p3 | false | | |
-| R00007 | 依次重启集群中各个组件(包含 calico,kube-proxy)`Pod`, 重启过程中访问集群外部的出口 IP 为设置好的 `eip`,并且业务不能断流, 等待集群恢复正常后,`egressgateway` 组件的各个 `cr` 状态正确 | p1 | false | | |
-| R00008 | 创建 `egressGateway` 分配有 100 个 IP 的池,创建 120 个 policy,做多次删除和创建操作之后,期望 `egressGateway` 及 `Policy` 状态正确, `pod` 的出口 IP 符合预期 | p1 | true | | |
\ No newline at end of file
+| R00001 | 使用 `kwok` 创建 10 个 `Node`,创建 1000 个副本的 `Deployment`,创建 `Policy` 并设置 `PodSelector`,使之与 `Deployment` 匹配,
真实节点中匹配到的所有 `Pod` 的出口 IP 为 `eip` | p3 | false | done | |
+| R00002 | 使用 `kwok` 创建 10 个 `Node`,创建 1000 个副本的 `Deployment`,创建 `Policy` 并设置 `PodSelector`,使之与 `Deployment` 匹配,
重启 `Deployment` 成功后, 真实节点中匹配到的所有 `Pod` 的出口 IP 为 `eip` | p3 | false | done | |
+| R00005 | 当关机 `eip` 生效的节点后,`eip` 会生效到另外匹配 `NodeSelector` 的节点上,
并且 `egressGatewayStatus` 及 `EgressClusterStatus` 如预期更新,与被关机的节点对应的 `EgressTunnel` 将被删除,出口 IP 如预期访问 | p3 | false | done | |
+| R00006 | 当关机 `egressGateway` 中 `NodeSelector` 匹配的所有节点后,
`Pod` 的出口 IP 将由 `eip` 改为非 `eip`,`egressGatewayStatus.NodeList` 将为空,相关的 `EgressIgnoreCIDR.NodeIP` 将被删除,与被关机的节点对应的 `EgressTunnel` 将被删除。
将其中一个 `node` 开机后,`egressgateway` 会在短时间内恢复并记录恢复时间,并且 `eip` 重新生效为 `Pod` 的出口 IP,`EgressIgnoreCIDR.NodeIP` 将对应的 `nodeIP` 添加并且 `egressGatewayStatus.NodeList` 中 `node` 相关信息更新正确,
全部开机最后 `eip` 只会生效在第一个恢复的 `node` 上,`EgressIgnoreCIDR.NodeIP` 更新正确 | p3 | false | done | |
+| R00007 | 依次重启集群中各个组件(包含 calico,kube-proxy)`Pod`, 重启过程中访问集群外部的出口 IP 为设置好的 `eip`,并且业务不能断流, 等待集群恢复正常后,`egressgateway` 组件的各个 `cr` 状态正确 | p1 | false | done | |
+| R00008 | 创建 `egressGateway` 分配有 100 个 IP 的池,创建 120 个 policy,做多次删除和创建操作之后,期望 `egressGateway` 及 `Policy` 状态正确, `pod` 的出口 IP 符合预期 | p1 | true | done | |
\ No newline at end of file
diff --git a/test/e2e/common/deploy.go b/test/e2e/common/deploy.go
index d65b613d6..e312bb022 100644
--- a/test/e2e/common/deploy.go
+++ b/test/e2e/common/deploy.go
@@ -5,7 +5,6 @@ package common
import (
"context"
- "fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
@@ -19,8 +18,8 @@ import (
e2eerr "github.com/spidernet-io/egressgateway/test/e2e/err"
)
-func CreateDeploy(ctx context.Context, cli client.Client, name string, image string, repolicas int) (*appsv1.Deployment, error) {
- ctx, cancel := context.WithTimeout(ctx, time.Second*20)
+func CreateDeploy(ctx context.Context, cli client.Client, name string, image string, repolicas int, timeout time.Duration) (*appsv1.Deployment, error) {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
var terminationGracePeriodSeconds int64 = 0
@@ -58,7 +57,7 @@ func CreateDeploy(ctx context.Context, cli client.Client, name string, image str
select {
case <-ctx.Done():
_ = DeleteObj(context.Background(), cli, res)
- return nil, fmt.Errorf("create DaemonSet time out")
+ return nil, e2eerr.ErrTimeout
default:
err := cli.Get(ctx, types.NamespacedName{Namespace: res.Namespace, Name: res.Name}, res)
if err != nil {
@@ -101,6 +100,11 @@ func WaitDeployDeleted(ctx context.Context, cli client.Client, deploy *appsv1.De
time.Sleep(time.Second / 2)
continue
}
+ pl, err := GetNodesPodList(ctx, cli, deploy.Spec.Template.Labels, []string{})
+ if err != nil || len(pl.Items) != 0 {
+ time.Sleep(time.Second / 2)
+ continue
+ }
return nil
}
}
diff --git a/test/e2e/common/egep.go b/test/e2e/common/egep.go
index 8054ab210..113f0f634 100644
--- a/test/e2e/common/egep.go
+++ b/test/e2e/common/egep.go
@@ -14,8 +14,6 @@ import (
egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
e2eerr "github.com/spidernet-io/egressgateway/test/e2e/err"
"github.com/spidernet-io/egressgateway/test/e2e/tools"
-
- . "github.com/onsi/ginkgo/v2"
)
func CheckEgressEndPointSliceStatus(ctx context.Context, cli client.Client, egp *egressv1.EgressPolicy) (bool, error) {
@@ -37,15 +35,10 @@ func CheckEgressEndPointSliceStatus(ctx context.Context, cli client.Client, egp
if pod2Ep[v.Name].Pod != v.Name ||
pod2Ep[v.Name].Namespace != v.Namespace ||
pod2Ep[v.Name].Node != v.Spec.NodeName {
- GinkgoWriter.Printf("pod2Ep[v.Name].Pod: %s\nv.Name: %s\n", pod2Ep[v.Name].Pod, v.Name)
- GinkgoWriter.Printf("pod2Ep[v.Name].Namespace: %s\nv.Namespace: %s\n", pod2Ep[v.Name].Namespace, v.Namespace)
- GinkgoWriter.Printf("pod2Ep[v.Name].Node: %s\nv.Spec.NodeName: %s\n", pod2Ep[v.Name].Node, v.Spec.NodeName)
ok++
}
ipv4s, ipv6s := GetPodIPs(&v)
if !tools.IsSameSlice(pod2Ep[v.Name].IPv4, ipv4s) || !tools.IsSameSlice(pod2Ep[v.Name].IPv6, ipv6s) {
- GinkgoWriter.Printf("pod2Ep[v.Name].IPv4: %v\nipv4s: %v\n", pod2Ep[v.Name].IPv4, ipv4s)
- GinkgoWriter.Printf("pod2Ep[v.Name].IPv6: %v\nipv6s: %v\n", pod2Ep[v.Name].IPv6, ipv6s)
ok++
}
if ok != 0 {
@@ -74,16 +67,10 @@ func CheckEgressClusterEndPointSliceStatus(ctx context.Context, cli client.Clien
if pod2Ep[v.Name].Pod != v.Name ||
pod2Ep[v.Name].Namespace != v.Namespace ||
pod2Ep[v.Name].Node != v.Spec.NodeName {
- GinkgoWriter.Printf("pod2Ep[v.Name].Pod: %s\nv.Name: %s\n", pod2Ep[v.Name].Pod, v.Name)
- GinkgoWriter.Printf("pod2Ep[v.Name].Namespace: %s\nv.Namespace: %s\n", pod2Ep[v.Name].Namespace, v.Namespace)
- GinkgoWriter.Printf("pod2Ep[v.Name].Node: %s\nv.Spec.NodeName: %s\n", pod2Ep[v.Name].Node, v.Spec.NodeName)
ok++
}
ipv4s, ipv6s := GetPodIPs(&v)
if !tools.IsSameSlice(pod2Ep[v.Name].IPv4, ipv4s) || !tools.IsSameSlice(pod2Ep[v.Name].IPv6, ipv6s) {
- GinkgoWriter.Printf("pod2Ep[v.Name].IPv4: %v\nipv4s: %v\n", pod2Ep[v.Name].IPv4, ipv4s)
- GinkgoWriter.Printf("pod2Ep[v.Name].IPv6: %v\nipv6s: %v\n", pod2Ep[v.Name].IPv6, ipv6s)
-
ok++
}
if ok != 0 {
diff --git a/test/e2e/common/egp.go b/test/e2e/common/egp.go
index 402a3fedc..28cc8dfef 100644
--- a/test/e2e/common/egp.go
+++ b/test/e2e/common/egp.go
@@ -322,6 +322,52 @@ func WaitEgressPolicyStatusReady(ctx context.Context, cli client.Client, egp *eg
}
}
+// WaitEgressClusterPolicyStatusReady waits for the EgressPolicy status.Eip to be allocated after the EgressPolicy is created
+func WaitEgressClusterPolicyStatusReady(ctx context.Context, cli client.Client, egcp *egressv1.EgressClusterPolicy, v4Enabled, v6Enabled bool, timeout time.Duration) error {
+ if !v4Enabled && !v6Enabled {
+ return fmt.Errorf("both v4 and v6 are not enabled")
+ }
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ var v4Ok, v6Ok bool
+
+ for {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("timeout to wait egressPolicy status ready")
+ default:
+ err := cli.Get(ctx, types.NamespacedName{Name: egcp.Name}, egcp)
+ if err != nil {
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ if !egcp.Spec.EgressIP.UseNodeIP {
+ if v4Enabled && len(egcp.Status.Eip.Ipv4) != 0 {
+ v4Ok = true
+ }
+ if v6Enabled && len(egcp.Status.Eip.Ipv6) != 0 {
+ v6Ok = true
+ }
+ } else {
+ if len(egcp.Status.Eip.Ipv4) == 0 && len(egcp.Status.Eip.Ipv6) == 0 {
+ return nil
+ }
+ }
+ if v4Enabled && v6Enabled {
+ if v4Ok && v6Ok {
+ return nil
+ }
+ } else if v4Enabled && v4Ok {
+ return nil
+ } else if v6Enabled && v6Ok {
+ return nil
+ }
+ time.Sleep(time.Second / 2)
+ }
+ }
+}
+
// CreateEgressPolicyWithEipAllocatorRR creates an egressPolicy and sets Spec.EgressIP.AllocatorPolicy to "rr"
func CreateEgressPolicyWithEipAllocatorRR(ctx context.Context, cli client.Client, egw *egressv1.EgressGateway, labels map[string]string) (*egressv1.EgressPolicy, error) {
return CreateEgressPolicyCustom(ctx, cli,
diff --git a/test/e2e/common/kwok.go b/test/e2e/common/kwok.go
index 701664f94..49ca8fe5f 100644
--- a/test/e2e/common/kwok.go
+++ b/test/e2e/common/kwok.go
@@ -4,13 +4,12 @@
package common
import (
+ "context"
"strconv"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/spidernet-io/e2eframework/framework"
)
func GenerateKwokNodeYaml(n int) *corev1.Node {
@@ -38,7 +37,7 @@ func GenerateKwokNodeYaml(n int) *corev1.Node {
},
Spec: corev1.NodeSpec{
Taints: []corev1.Taint{
- KwokNodeTaint,
+ // KwokNodeTaint,
},
},
Status: corev1.NodeStatus{
@@ -51,23 +50,28 @@ func GenerateKwokNodeYaml(n int) *corev1.Node {
return node
}
-func CreateKwokNodes(f *framework.Framework, n int) error {
+func CreateKwokNodes(ctx context.Context, cli client.Client, n int) error {
for i := 0; i < n; i++ {
- e := f.CreateResource(GenerateKwokNodeYaml(i))
- if e != nil {
- return e
+ err := cli.Create(ctx, GenerateKwokNodeYaml(i))
+ if err != nil {
+ return err
}
}
return nil
}
-func GetKwokNodes(f *framework.Framework) (*corev1.NodeList, error) {
- return f.GetNodeList(client.MatchingLabels(KwokNodeLabel))
+func GetKwokNodes(ctx context.Context, cli client.Client) (*corev1.NodeList, error) {
+ nodeList := new(corev1.NodeList)
+ err := cli.List(ctx, nodeList, client.MatchingLabels(KwokNodeLabel))
+ if err != nil {
+ return nil, err
+ }
+ return nodeList, nil
}
-func DeleteKwokNodes(f *framework.Framework, nodes *corev1.NodeList) error {
+func DeleteKwokNodes(ctx context.Context, cli client.Client, nodes *corev1.NodeList) error {
for _, node := range nodes.Items {
- err := f.DeleteResource(&node)
+ err := DeleteObj(ctx, cli, &node)
if err != nil {
return err
}
diff --git a/test/e2e/common/node.go b/test/e2e/common/node.go
index be91cf4c3..9b906c2a4 100644
--- a/test/e2e/common/node.go
+++ b/test/e2e/common/node.go
@@ -60,7 +60,7 @@ func PowerOffNodeUntilNotReady(ctx context.Context, cli client.Client, nodeName
c := fmt.Sprintf("docker stop %s", nodeName)
out, err := tools.ExecCommand(ctx, c, execTimeout)
if err != nil {
- return fmt.Errorf("err: %v\nout: %v\n", err, string(out))
+ return fmt.Errorf("err: %v\nout: %v", err, string(out))
}
ctx, cancel := context.WithTimeout(ctx, poweroffTimeout)
@@ -87,7 +87,7 @@ func PowerOnNodeUntilReady(ctx context.Context, cli client.Client, nodeName stri
c := fmt.Sprintf("docker start %s", nodeName)
out, err := tools.ExecCommand(ctx, c, execTimeout)
if err != nil {
- return fmt.Errorf("err: %v\nout: %v\n", err, string(out))
+ return fmt.Errorf("err: %v\nout: %v", err, string(out))
}
ctx, cancel := context.WithTimeout(ctx, poweronTimeout)
@@ -95,7 +95,7 @@ func PowerOnNodeUntilReady(ctx context.Context, cli client.Client, nodeName stri
for {
select {
case <-ctx.Done():
- return e2eerr.ErrTimeout
+ return e2eerr.ErrWaitNodeOnTimeout
default:
node, err := GetNode(ctx, cli, nodeName)
if err != nil {
@@ -137,7 +137,6 @@ func GetNodeIP(node *corev1.Node) (string, string) {
}
func CheckNodeStatus(node *corev1.Node, expectReady bool) bool {
-
unreachTaintTemp := &corev1.Taint{
Key: corev1.TaintNodeUnreachable,
Effect: corev1.TaintEffectNoExecute,
diff --git a/test/e2e/common/pod.go b/test/e2e/common/pod.go
index dc1e63f62..01338cd89 100644
--- a/test/e2e/common/pod.go
+++ b/test/e2e/common/pod.go
@@ -62,7 +62,7 @@ func CreatePod(ctx context.Context, cli client.Client, image string) (*corev1.Po
return nil, err
}
- if res.Status.Phase == corev1.PodRunning {
+ if IfContainerRunning(res) {
return res, nil
}
@@ -134,7 +134,7 @@ func WaitPodRunning(ctx context.Context, cli client.Client, pod *corev1.Pod, tim
time.Sleep(time.Second)
continue
}
- if pod.Status.Phase == corev1.PodRunning {
+ if IfPodRunning(pod) {
return nil
}
time.Sleep(time.Second)
@@ -176,14 +176,14 @@ WAIT:
for {
select {
case <-ctx.Done():
- return e2eerr.ErrTimeout
+ return e2eerr.ErrWaitPodRunningTimeout
default:
err := cli.List(ctx, podList)
if err != nil {
continue
}
for _, pod := range podList.Items {
- if pod.Status.Phase != corev1.PodRunning {
+ if !IfPodRunning(&pod) {
time.Sleep(time.Second)
goto WAIT
}
@@ -241,7 +241,7 @@ func IfPodListRestarted(pods *corev1.PodList) bool {
return false
}
}
- if p.Status.Phase != corev1.PodRunning {
+ if !IfPodRunning(&p) {
return false
}
}
@@ -280,7 +280,7 @@ func DeletePodsUntilReady(ctx context.Context, cli client.Client, labels map[str
}
for _, p := range pl.Items {
- if p.Status.Phase != corev1.PodRunning {
+ if !IfPodRunning(&p) {
time.Sleep(time.Second)
continue
}
@@ -307,3 +307,17 @@ func DeletePodList(ctx context.Context, cli client.Client, podList *corev1.PodLi
}
return nil
}
+
+// IfContainerRunning check if the containers of the pod running
+func IfContainerRunning(pod *corev1.Pod) bool {
+ for _, c := range pod.Status.ContainerStatuses {
+ if c.State.Running == nil || !c.Ready {
+ return false
+ }
+ }
+ return true
+}
+
+func IfPodRunning(pod *corev1.Pod) bool {
+ return pod.Status.Phase == corev1.PodRunning
+}
diff --git a/test/e2e/egressendpointslice/egressendpointslice_test.go b/test/e2e/egressendpointslice/egressendpointslice_test.go
index 0d10efa1b..130ec5116 100644
--- a/test/e2e/egressendpointslice/egressendpointslice_test.go
+++ b/test/e2e/egressendpointslice/egressendpointslice_test.go
@@ -92,7 +92,7 @@ var _ = Describe("Egressendpointslice", func() {
It("test the namespace-level policy", func() {
// create deploy
deployName := "deploy-" + uuid.NewString()
- deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum)
+ deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum, time.Second*20)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create daemonset %s\n", deployName))
GinkgoWriter.Printf("succeeded to create deploy %s\n", deploy.Name)
@@ -106,7 +106,7 @@ var _ = Describe("Egressendpointslice", func() {
Expect(err).NotTo(HaveOccurred())
// create deploy agen
- deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum)
+ deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum, time.Second*20)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create daemonset %s\n", deployName))
GinkgoWriter.Printf("succeeded to create deploy %s\n", deploy.Name)
@@ -133,7 +133,7 @@ var _ = Describe("Egressendpointslice", func() {
It("test the cluster-level policy", func() {
// create deploy
deployName := "deploy-" + uuid.NewString()
- deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum)
+ deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum, time.Second*20)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create daemonset %s\n", deployName))
GinkgoWriter.Printf("succeeded to create deploy %s\n", deploy.Name)
@@ -147,7 +147,7 @@ var _ = Describe("Egressendpointslice", func() {
Expect(err).NotTo(HaveOccurred())
// create deploy again
- deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum)
+ deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, podNum, time.Second*20)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create daemonset %s\n", deployName))
GinkgoWriter.Printf("succeeded to create deploy %s\n", deploy.Name)
diff --git a/test/e2e/err/err.go b/test/e2e/err/err.go
index cb0cbad6f..184265b3b 100644
--- a/test/e2e/err/err.go
+++ b/test/e2e/err/err.go
@@ -8,5 +8,7 @@ import (
)
var (
- ErrTimeout = errors.New("error timeout")
+ ErrTimeout = errors.New("error timeout")
+ ErrWaitNodeOnTimeout = errors.New("timeout waiting node to be ready timeout")
+ ErrWaitPodRunningTimeout = errors.New("timeout waiting for pod running")
)
diff --git a/test/e2e/reliability/reliability_suite_test.go b/test/e2e/reliability/reliability_suite_test.go
index 6919d2169..0efa4e59c 100644
--- a/test/e2e/reliability/reliability_suite_test.go
+++ b/test/e2e/reliability/reliability_suite_test.go
@@ -32,6 +32,7 @@ var (
cli client.Client
nodeNameList, workerNodes []string
+ nodeLabel map[string]string
)
var _ = BeforeSuite(func() {
@@ -60,6 +61,8 @@ var _ = BeforeSuite(func() {
}
Expect(len(workerNodes) > 1).To(BeTrue(), "this test case needs at lest 2 worker nodes")
+ nodeLabel = nodes.Items[0].Labels
+
// get egressgateway config
configMap := &corev1.ConfigMap{}
err = cli.Get(ctx, types.NamespacedName{Name: "egressgateway", Namespace: config.Namespace}, configMap)
diff --git a/test/e2e/reliability/reliability_test.go b/test/e2e/reliability/reliability_test.go
index 7d9bd8f1d..bd8890a95 100644
--- a/test/e2e/reliability/reliability_test.go
+++ b/test/e2e/reliability/reliability_test.go
@@ -18,6 +18,7 @@ import (
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -48,8 +49,9 @@ var _ = Describe("Reliability", Serial, Label("Reliability"), func() {
labels = map[string]string{"eg-reliability": "true"}
selector := egressv1.NodeSelector{Selector: &v1.LabelSelector{MatchLabels: labels}}
- err = common.LabelNodes(ctx, cli, egNodes, labels)
- Expect(err).NotTo(HaveOccurred())
+ Eventually(ctx, func() error {
+ return common.LabelNodes(ctx, cli, egNodes, labels)
+ }).WithTimeout(time.Second * 6).WithPolling(time.Second * 2).Should(Succeed())
ipNum = 3
pool, err = common.GenIPPools(ctx, cli, egressConfig.EnableIPv4, egressConfig.EnableIPv6, int64(ipNum), 2)
@@ -96,7 +98,7 @@ var _ = Describe("Reliability", Serial, Label("Reliability"), func() {
// start up all nodes if some nodes not ready
GinkgoWriter.Println("PowerOnNodesUntilClusterReady")
- Expect(common.PowerOnNodesUntilClusterReady(ctx, cli, workerNodes, time.Minute, time.Minute)).NotTo(HaveOccurred())
+ Expect(common.PowerOnNodesUntilClusterReady(ctx, cli, workerNodes, time.Minute*3, time.Minute*3)).NotTo(HaveOccurred())
// unlabel nodes
GinkgoWriter.Println("unLabel nodes")
@@ -296,6 +298,146 @@ var _ = Describe("Reliability", Serial, Label("Reliability"), func() {
Entry("restart calico-kube-controllers", constant.CalicoControllerLabel, time.Minute),
)
})
+
+ Context("kwok", Serial, func() {
+ var ctx context.Context
+ var err error
+
+ // kwok
+ var (
+ kNodesNum int
+ kwokNodes *corev1.NodeList
+ )
+ // deploy
+ var (
+ deploy *appsv1.Deployment
+ deployReplicas int
+ deployName string
+ )
+
+ // gateway
+ var (
+ egw *egressv1.EgressGateway
+ ipNum int
+ pool egressv1.Ippools
+ )
+
+ // policy
+ var (
+ egp *egressv1.EgressPolicy
+ egcp *egressv1.EgressClusterPolicy
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ kNodesNum = 10
+ // deploy
+ deployReplicas = 50
+ deployName = "dp-" + uuid.NewString()
+
+ // gateway
+ ipNum = 3
+
+ // create deploy
+ deploy, err = common.CreateDeploy(ctx, cli, deployName, config.Image, deployReplicas, time.Minute*5)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(deploy).NotTo(BeNil())
+ GinkgoWriter.Printf("succeeded to create the deploy %s\n", deploy.Name)
+
+ // create EgressGateway
+ pool, err = common.GenIPPools(ctx, cli, egressConfig.EnableIPv4, egressConfig.EnableIPv6, int64(ipNum), 1)
+ Expect(err).NotTo(HaveOccurred())
+ nodeSelector := egressv1.NodeSelector{Selector: &v1.LabelSelector{MatchLabels: nodeLabel}}
+
+ egw, err = common.CreateGatewayNew(ctx, cli, "egw-"+uuid.NewString(), pool, nodeSelector)
+ Expect(err).NotTo(HaveOccurred())
+ GinkgoWriter.Printf("succeeded to create the egressGateway: %s\n", egw.Name)
+
+ // create 10 kwok node
+ Expect(common.CreateKwokNodes(ctx, cli, kNodesNum)).NotTo(HaveOccurred())
+ GinkgoWriter.Println("succeeded to create the kwok nodes")
+
+ // get kwok nodes
+ kwokNodes, err = common.GetKwokNodes(ctx, cli)
+ Expect(err).NotTo(HaveOccurred())
+
+ DeferCleanup(func() {
+ // delete deploy
+ if deploy != nil {
+ GinkgoWriter.Printf("delete deploy %s\n", deploy.Name)
+ Expect(common.WaitDeployDeleted(ctx, cli, deploy, time.Minute*5)).NotTo(HaveOccurred())
+ }
+
+ // delete policy
+ if egp != nil {
+ GinkgoWriter.Printf("delete policy %s\n", egp.Name)
+ Expect(common.DeleteEgressPolicies(ctx, cli, []*egressv1.EgressPolicy{egp})).NotTo(HaveOccurred())
+ }
+
+ // delete cluster policy
+ if egcp != nil {
+ GinkgoWriter.Printf("delete cluster policy %s\n", egcp.Name)
+ Expect(common.DeleteEgressClusterPolicies(ctx, cli, []*egressv1.EgressClusterPolicy{egcp})).NotTo(HaveOccurred())
+ }
+
+ // delete gateway
+ if egw != nil {
+ GinkgoWriter.Printf("delete gateway %s\n", egw.Name)
+ Expect(common.DeleteEgressGateway(ctx, cli, egw, time.Second*5)).NotTo(HaveOccurred())
+ }
+
+ // delete kwok nodes
+ GinkgoWriter.Println("delete kwok nodes")
+ Expect(common.DeleteKwokNodes(ctx, cli, kwokNodes)).NotTo(HaveOccurred())
+ })
+ })
+
+ Context("check eip", Serial, func() {
+ It("namespace-level policy", Label("R00001", "R00002"), func() {
+ // create policy
+ egp, err = createPolicy(ctx, egw, deploy)
+ Expect(err).NotTo(HaveOccurred())
+ GinkgoWriter.Printf("succeeded to create the policy %s\n", egp.Name)
+
+ // check the export ip of the pods on the real nodes
+ GinkgoWriter.Println("check the export ip of the pods on the real nodes")
+ err = common.CheckEgressIPOfNodesPodList(ctx, cli, config, egressConfig, deploy.Spec.Template.Labels, nodeNameList, egp.Status.Eip.Ipv4, egp.Status.Eip.Ipv6, true)
+ Expect(err).NotTo(HaveOccurred())
+
+ // restart deploy
+ GinkgoWriter.Printf("restart the deployment %s\n", deploy.Name)
+ err = common.DeletePodsUntilReady(ctx, cli, deploy.Spec.Template.Labels, time.Minute*10)
+ Expect(err).NotTo(HaveOccurred())
+
+ // check the export ip of the pods on the real nodes again
+ GinkgoWriter.Println("check the export ip of the pods on the real nodes again")
+ err = common.CheckEgressIPOfNodesPodList(ctx, cli, config, egressConfig, deploy.Spec.Template.Labels, nodeNameList, egp.Status.Eip.Ipv4, egp.Status.Eip.Ipv6, true)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("cluster-level policy", Label("R00001", "R00002"), func() {
+ // create policy
+ egcp, err = createClusterPolicy(ctx, egw, deploy)
+ Expect(err).NotTo(HaveOccurred())
+ GinkgoWriter.Printf("succeeded to create the policy %s\n", egcp.Name)
+
+ // check the export ip of the pods on the real nodes
+ GinkgoWriter.Println("check the export ip of the pods on the real nodes")
+ err = common.CheckEgressIPOfNodesPodList(ctx, cli, config, egressConfig, deploy.Spec.Template.Labels, nodeNameList, egcp.Status.Eip.Ipv4, egcp.Status.Eip.Ipv6, true)
+ Expect(err).NotTo(HaveOccurred())
+
+ // restart deploy
+ GinkgoWriter.Printf("restart the deployment %s\n", deploy.Name)
+ err = common.DeletePodsUntilReady(ctx, cli, deploy.Spec.Template.Labels, time.Minute*10)
+ Expect(err).NotTo(HaveOccurred())
+
+ // check the export ip of the pods on the real nodes again
+ GinkgoWriter.Println("check the export ip of the pods on the real nodes again")
+ err = common.CheckEgressIPOfNodesPodList(ctx, cli, config, egressConfig, deploy.Spec.Template.Labels, nodeNameList, egcp.Status.Eip.Ipv4, egcp.Status.Eip.Ipv6, true)
+ Expect(err).NotTo(HaveOccurred())
+ })
+ })
+ })
})
func checkGatewayStatus(ctx context.Context, cli client.Client, pool egressv1.Ippools, ipNum int, gatewayNode string, otherNodes, notReadyNodes []string, policy *egressv1.EgressPolicy, egw *egressv1.EgressGateway, timeout time.Duration) {
@@ -362,3 +504,54 @@ func checkGatewayStatus(ctx context.Context, cli client.Client, pool egressv1.Ip
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("expect: %v\ngot: %v\n", *expectGatewayStatus, egw.Status))
GinkgoWriter.Println("succeeded to check gateway status")
}
+
+func createPolicy(ctx context.Context, egw *egressv1.EgressGateway, deploy *appsv1.Deployment) (*egressv1.EgressPolicy, error) {
+ // create policy
+ egp, err := common.CreateEgressPolicyCustom(ctx, cli,
+ func(egp *egressv1.EgressPolicy) {
+ egp.Spec.EgressGatewayName = egw.Name
+
+ if egressConfig.EnableIPv4 {
+ egp.Spec.EgressIP.IPv4 = egw.Spec.Ippools.Ipv4DefaultEIP
+ }
+ if egressConfig.EnableIPv6 {
+ egp.Spec.EgressIP.IPv6 = egw.Spec.Ippools.Ipv6DefaultEIP
+ }
+ egp.Spec.AppliedTo.PodSelector = deploy.Spec.Selector
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // wait policy status ready
+ err = common.WaitEgressPolicyStatusReady(ctx, cli, egp, egressConfig.EnableIPv4, egressConfig.EnableIPv6, time.Second*20)
+ if err != nil {
+ return nil, err
+ }
+ return egp, nil
+}
+
+func createClusterPolicy(ctx context.Context, egw *egressv1.EgressGateway, deploy *appsv1.Deployment) (*egressv1.EgressClusterPolicy, error) {
+ // create policy
+ egcp, err := common.CreateEgressClusterPolicyCustom(ctx, cli,
+ func(egcp *egressv1.EgressClusterPolicy) {
+ egcp.Spec.EgressGatewayName = egw.Name
+ if egressConfig.EnableIPv4 {
+ egcp.Spec.EgressIP.IPv4 = egw.Spec.Ippools.Ipv4DefaultEIP
+ }
+ if egressConfig.EnableIPv6 {
+ egcp.Spec.EgressIP.IPv6 = egw.Spec.Ippools.Ipv6DefaultEIP
+ }
+ egcp.Spec.AppliedTo.PodSelector = deploy.Spec.Selector
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // wait policy status ready
+ err = common.WaitEgressClusterPolicyStatusReady(ctx, cli, egcp, egressConfig.EnableIPv4, egressConfig.EnableIPv6, time.Second*20)
+ if err != nil {
+ return nil, err
+ }
+ return egcp, nil
+}
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/LICENSE b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/LICENSE
deleted file mode 100644
index 8dada3eda..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go
deleted file mode 100644
index 8ea2a3028..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package k8scnicncfio
-
-const (
- GroupName = "k8s.cni.cncf.io"
-)
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go
deleted file mode 100644
index 2882952a0..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +k8s:deepcopy-gen=package,register
-// +groupName=k8s.cni.cncf.io
-// +groupGoName=K8sCniCncfIo
-
-package v1
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go
deleted file mode 100644
index e40da2572..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- k8scnicncfio "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io"
-)
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: k8scnicncfio.GroupName, Version: "v1"}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
- SchemeBuilder runtime.SchemeBuilder
- localSchemeBuilder = &SchemeBuilder
- AddToScheme = localSchemeBuilder.AddToScheme
-)
-
-func init() {
- // We only register manually written functions here. The registration of the
- // generated functions takes place in the generated files. The separation
- // makes the code compile even when the generated files are missing.
- localSchemeBuilder.Register(addKnownTypes)
-}
-
-// Adds the list of known types to api.Scheme.
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &NetworkAttachmentDefinition{},
- &NetworkAttachmentDefinitionList{},
- )
- metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go
deleted file mode 100644
index 2b81d0482..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "net"
-)
-
-// +genclient
-// +genclient:noStatus
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +resourceName=network-attachment-definitions
-
-type NetworkAttachmentDefinition struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec NetworkAttachmentDefinitionSpec `json:"spec"`
-}
-
-type NetworkAttachmentDefinitionSpec struct {
- Config string `json:"config"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type NetworkAttachmentDefinitionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []NetworkAttachmentDefinition `json:"items"`
-}
-
-// DNS contains values interesting for DNS resolvers
-// +k8s:deepcopy-gen=false
-type DNS struct {
- Nameservers []string `json:"nameservers,omitempty"`
- Domain string `json:"domain,omitempty"`
- Search []string `json:"search,omitempty"`
- Options []string `json:"options,omitempty"`
-}
-
-const (
- DeviceInfoTypePCI = "pci"
- DeviceInfoTypeVHostUser = "vhost-user"
- DeviceInfoTypeMemif = "memif"
- DeviceInfoTypeVDPA = "vdpa"
- DeviceInfoVersion = "1.1.0"
-)
-
-// DeviceInfo contains the information of the device associated
-// with this network (if any)
-type DeviceInfo struct {
- Type string `json:"type,omitempty"`
- Version string `json:"version,omitempty"`
- Pci *PciDevice `json:"pci,omitempty"`
- Vdpa *VdpaDevice `json:"vdpa,omitempty"`
- VhostUser *VhostDevice `json:"vhost-user,omitempty"`
- Memif *MemifDevice `json:"memif,omitempty"`
-}
-
-type PciDevice struct {
- PciAddress string `json:"pci-address,omitempty"`
- Vhostnet string `json:"vhost-net,omitempty"`
- RdmaDevice string `json:"rdma-device,omitempty"`
- PfPciAddress string `json:"pf-pci-address,omitempty"`
- RepresentorDevice string `json:"representor-device,omitempty"`
-}
-
-type VdpaDevice struct {
- ParentDevice string `json:"parent-device,omitempty"`
- Driver string `json:"driver,omitempty"`
- Path string `json:"path,omitempty"`
- PciAddress string `json:"pci-address,omitempty"`
- PfPciAddress string `json:"pf-pci-address,omitempty"`
- RepresentorDevice string `json:"representor-device,omitempty"`
-}
-
-const (
- VhostDeviceModeClient = "client"
- VhostDeviceModeServer = "server"
-)
-
-type VhostDevice struct {
- Mode string `json:"mode,omitempty"`
- Path string `json:"path,omitempty"`
-}
-
-const (
- MemifDeviceRoleMaster = "master"
- MemitDeviceRoleSlave = "slave"
- MemifDeviceModeEthernet = "ethernet"
- MemitDeviceModeIP = "ip"
- MemitDeviceModePunt = "punt"
-)
-
-type MemifDevice struct {
- Role string `json:"role,omitempty"`
- Path string `json:"path,omitempty"`
- Mode string `json:"mode,omitempty"`
-}
-
-// NetworkStatus is for network status annotation for pod
-// +k8s:deepcopy-gen=false
-type NetworkStatus struct {
- Name string `json:"name"`
- Interface string `json:"interface,omitempty"`
- IPs []string `json:"ips,omitempty"`
- Mac string `json:"mac,omitempty"`
- Default bool `json:"default,omitempty"`
- DNS DNS `json:"dns,omitempty"`
- DeviceInfo *DeviceInfo `json:"device-info,omitempty"`
- Gateway []string `json:"gateway,omitempty"`
-}
-
-// PortMapEntry for CNI PortMapEntry
-// +k8s:deepcopy-gen=false
-type PortMapEntry struct {
- HostPort int `json:"hostPort"`
- ContainerPort int `json:"containerPort"`
- Protocol string `json:"protocol,omitempty"`
- HostIP string `json:"hostIP,omitempty"`
-}
-
-// BandwidthEntry for CNI BandwidthEntry
-// +k8s:deepcopy-gen=false
-type BandwidthEntry struct {
- IngressRate int `json:"ingressRate"`
- IngressBurst int `json:"ingressBurst"`
-
- EgressRate int `json:"egressRate"`
- EgressBurst int `json:"egressBurst"`
-}
-
-// NetworkSelectionElement represents one element of the JSON format
-// Network Attachment Selection Annotation as described in section 4.1.2
-// of the CRD specification.
-// +k8s:deepcopy-gen=false
-type NetworkSelectionElement struct {
- // Name contains the name of the Network object this element selects
- Name string `json:"name"`
- // Namespace contains the optional namespace that the network referenced
- // by Name exists in
- Namespace string `json:"namespace,omitempty"`
- // IPRequest contains an optional requested IP addresses for this network
- // attachment
- IPRequest []string `json:"ips,omitempty"`
- // MacRequest contains an optional requested MAC address for this
- // network attachment
- MacRequest string `json:"mac,omitempty"`
- // InfinibandGUIDRequest contains an optional requested Infiniband GUID
- // address for this network attachment
- InfinibandGUIDRequest string `json:"infiniband-guid,omitempty"`
- // InterfaceRequest contains an optional requested name for the
- // network interface this attachment will create in the container
- InterfaceRequest string `json:"interface,omitempty"`
- // PortMappingsRequest contains an optional requested port mapping
- // for the network
- PortMappingsRequest []*PortMapEntry `json:"portMappings,omitempty"`
- // BandwidthRequest contains an optional requested bandwidth for
- // the network
- BandwidthRequest *BandwidthEntry `json:"bandwidth,omitempty"`
- // CNIArgs contains additional CNI arguments for the network interface
- CNIArgs *map[string]interface{} `json:"cni-args,omitempty"`
- // GatewayRequest contains default route IP address for the pod
- GatewayRequest []net.IP `json:"default-route,omitempty"`
-}
-
-const (
- // Pod annotation for network-attachment-definition
- NetworkAttachmentAnnot = "k8s.v1.cni.cncf.io/networks"
- // Pod annotation for network status
- NetworkStatusAnnot = "k8s.v1.cni.cncf.io/network-status"
-)
-
-// NoK8sNetworkError indicates error, no network in kubernetes
-// +k8s:deepcopy-gen=false
-type NoK8sNetworkError struct {
- Message string
-}
-
-func (e *NoK8sNetworkError) Error() string { return string(e.Message) }
diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go
deleted file mode 100644
index 9a7b1fcce..000000000
--- a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,202 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright 2021 The Kubernetes Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DeviceInfo) DeepCopyInto(out *DeviceInfo) {
- *out = *in
- if in.Pci != nil {
- in, out := &in.Pci, &out.Pci
- *out = new(PciDevice)
- **out = **in
- }
- if in.Vdpa != nil {
- in, out := &in.Vdpa, &out.Vdpa
- *out = new(VdpaDevice)
- **out = **in
- }
- if in.VhostUser != nil {
- in, out := &in.VhostUser, &out.VhostUser
- *out = new(VhostDevice)
- **out = **in
- }
- if in.Memif != nil {
- in, out := &in.Memif, &out.Memif
- *out = new(MemifDevice)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceInfo.
-func (in *DeviceInfo) DeepCopy() *DeviceInfo {
- if in == nil {
- return nil
- }
- out := new(DeviceInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *MemifDevice) DeepCopyInto(out *MemifDevice) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemifDevice.
-func (in *MemifDevice) DeepCopy() *MemifDevice {
- if in == nil {
- return nil
- }
- out := new(MemifDevice)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkAttachmentDefinition) DeepCopyInto(out *NetworkAttachmentDefinition) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinition.
-func (in *NetworkAttachmentDefinition) DeepCopy() *NetworkAttachmentDefinition {
- if in == nil {
- return nil
- }
- out := new(NetworkAttachmentDefinition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *NetworkAttachmentDefinition) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkAttachmentDefinitionList) DeepCopyInto(out *NetworkAttachmentDefinitionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]NetworkAttachmentDefinition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinitionList.
-func (in *NetworkAttachmentDefinitionList) DeepCopy() *NetworkAttachmentDefinitionList {
- if in == nil {
- return nil
- }
- out := new(NetworkAttachmentDefinitionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *NetworkAttachmentDefinitionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkAttachmentDefinitionSpec) DeepCopyInto(out *NetworkAttachmentDefinitionSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinitionSpec.
-func (in *NetworkAttachmentDefinitionSpec) DeepCopy() *NetworkAttachmentDefinitionSpec {
- if in == nil {
- return nil
- }
- out := new(NetworkAttachmentDefinitionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PciDevice) DeepCopyInto(out *PciDevice) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PciDevice.
-func (in *PciDevice) DeepCopy() *PciDevice {
- if in == nil {
- return nil
- }
- out := new(PciDevice)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *VdpaDevice) DeepCopyInto(out *VdpaDevice) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VdpaDevice.
-func (in *VdpaDevice) DeepCopy() *VdpaDevice {
- if in == nil {
- return nil
- }
- out := new(VdpaDevice)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *VhostDevice) DeepCopyInto(out *VhostDevice) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VhostDevice.
-func (in *VhostDevice) DeepCopy() *VhostDevice {
- if in == nil {
- return nil
- }
- out := new(VhostDevice)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/LICENSE b/vendor/github.com/spidernet-io/e2eframework/LICENSE
deleted file mode 100644
index cea09ee3f..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 SpiderNet-io Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/command.go b/vendor/github.com/spidernet-io/e2eframework/framework/command.go
deleted file mode 100644
index 9fafdf0f6..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/command.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-// operate node container, like shutdown, ssh to login
-func (f *Framework) ExecKubectl(command string, ctx context.Context) ([]byte, error) {
- args := fmt.Sprintf("kubectl --kubeconfig %s %s", f.Info.KubeConfigPath, command)
- return exec.CommandContext(ctx, "sh", "-c", args).CombinedOutput()
-}
-
-func (f *Framework) ExecCommandInPod(podName, nameSpace, command string, ctx context.Context) ([]byte, error) {
- command = fmt.Sprintf("exec %s -n %s -- %s", podName, nameSpace, command)
- return f.ExecKubectl(command, ctx)
-}
-
-// DockerExecCommand is eq to `docker exec $containerId $command `
-func (f *Framework) DockerExecCommand(ctx context.Context, containerId string, command string) ([]byte, error) {
- fullCommand := fmt.Sprintf("docker exec -i %s %s ", containerId, command)
- f.t.Logf(fullCommand)
- return exec.CommandContext(ctx, "/bin/sh", "-c", fullCommand).CombinedOutput()
-}
-
-// DockerRunCommand eq to `docker run $command`
-func (f *Framework) DockerRunCommand(ctx context.Context, command string) ([]byte, error) {
- fullCommand := fmt.Sprintf("docker run %s ", command)
- f.t.Logf(fullCommand)
- return exec.CommandContext(ctx, "/bin/sh", "-c", fullCommand).CombinedOutput()
-}
-
-// DockerRMCommand is eq to `docker rm $containerId`
-func (f *Framework) DockerRMCommand(ctx context.Context, containerId string) ([]byte, error) {
- fullCommand := fmt.Sprintf("docker rm -f %s", containerId)
- f.t.Logf(fullCommand)
- return exec.CommandContext(ctx, "/bin/bash", "-c", fullCommand).CombinedOutput()
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/configmap.go b/vendor/github.com/spidernet-io/e2eframework/framework/configmap.go
deleted file mode 100644
index 4ffd6bede..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/configmap.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "github.com/spidernet-io/e2eframework/tools"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- apitypes "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "time"
-)
-
-func (f *Framework) GetConfigmap(name, namespace string) (*corev1.ConfigMap, error) {
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- key := apitypes.NamespacedName{Namespace: namespace, Name: name}
- existing := &corev1.ConfigMap{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) CreateConfigmap(configMap *corev1.ConfigMap, opts ...client.CreateOption) error {
- if configMap == nil {
- return ErrWrongInput
- }
-
- fake := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: configMap.Namespace,
- Name: configMap.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &corev1.ConfigMap{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &corev1.ConfigMap{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same configmap %v/%v to finish deleting \n", configMap.ObjectMeta.Namespace, configMap.ObjectMeta.Name)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
- return f.CreateResource(configMap, opts...)
-}
-
-func (f *Framework) DeleteConfigmap(name, namespace string, opts ...client.DeleteOption) error {
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
- cm := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- },
- }
- return f.DeleteResource(cm, opts...)
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/daemonset.go b/vendor/github.com/spidernet-io/e2eframework/framework/daemonset.go
deleted file mode 100644
index bd0979305..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/daemonset.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/watch"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateDaemonSet(ds *appsv1.DaemonSet, opts ...client.CreateOption) error {
- // try to wait for finish last deleting
- fake := &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ds.ObjectMeta.Namespace,
- Name: ds.ObjectMeta.Name,
- },
- }
-
- key := client.ObjectKeyFromObject(fake)
- existing := &appsv1.DaemonSet{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &appsv1.DaemonSet{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same DaemonSet %v/%v to finish deleting \n", ds.ObjectMeta.Name, ds.ObjectMeta.Namespace)
- return false
- }
-
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
- return f.CreateResource(ds, opts...)
-}
-
-func (f *Framework) DeleteDaemonSet(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
- ds := &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(ds, opts...)
-}
-
-func (f *Framework) GetDaemonSet(name, namespace string) (*appsv1.DaemonSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
- ds := &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(ds)
- existing := &appsv1.DaemonSet{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetDaemonSetPodList(ds *appsv1.DaemonSet) (*corev1.PodList, error) {
- if ds == nil {
- return nil, ErrWrongInput
- }
- pods := &corev1.PodList{}
- ops := []client.ListOption{
- client.MatchingLabelsSelector{
- Selector: labels.SelectorFromSet(ds.Spec.Selector.MatchLabels),
- },
- }
- e := f.ListResource(pods, ops...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-func (f *Framework) WaitDaemonSetReady(name, namespace string, ctx context.Context) (*appsv1.DaemonSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- l := &client.ListOptions{
- Namespace: namespace,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
- }
- watchInterface, err := f.KClient.Watch(ctx, &appsv1.DaemonSetList{}, l)
- if err != nil {
- return nil, ErrWatch
- }
- defer watchInterface.Stop()
-
- for {
- select {
- // if ds not exist , got no event
- case event, ok := <-watchInterface.ResultChan():
- if !ok {
- return nil, ErrChanelClosed
- }
- f.Log("DaemonSet %v/%v %v event \n", namespace, name, event.Type)
-
- switch event.Type {
- case watch.Error:
- return nil, ErrEvent
- case watch.Deleted:
- return nil, ErrResDel
- default:
- ds, ok := event.Object.(*appsv1.DaemonSet)
- if !ok {
- return nil, ErrGetObj
- }
-
- if ds.Status.NumberReady == 0 {
- break
-
- } else if ds.Status.NumberReady == ds.Status.DesiredNumberScheduled {
-
- return ds, nil
- }
- }
- case <-ctx.Done():
- return nil, ErrTimeOut
- }
- }
-}
-
-// Create Daemonset and wait for ready and check that the IP of the Pod is assigned correctly
-func (f *Framework) CreateDaemonsetUntilReady(ctx context.Context, dsObj *appsv1.DaemonSet, opts ...client.CreateOption) (*corev1.PodList, error) {
- if dsObj == nil {
- return nil, ErrWrongInput
- }
-
- err := f.CreateDaemonSet(dsObj, opts...)
- if err != nil {
- return nil, err
- }
- ds, err := f.WaitDaemonSetReady(dsObj.Name, dsObj.Namespace, ctx)
- if err != nil {
- return nil, err
- }
- // Assignment of IPv4 or IPv6 address successful
-OUTER:
- for {
- time.Sleep(time.Second)
- select {
- case <-ctx.Done():
- return nil, ErrTimeOut
- default:
- podList, err := f.GetPodListByLabel(ds.Spec.Selector.MatchLabels)
- if err != nil {
- return nil, err
- }
- err = f.CheckPodListIpReady(podList)
- if err != nil {
- continue OUTER
- }
- return podList, nil
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/deployment.go b/vendor/github.com/spidernet-io/e2eframework/framework/deployment.go
deleted file mode 100644
index 4445e1d35..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/deployment.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/utils/pointer"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateDeployment(dpm *appsv1.Deployment, opts ...client.CreateOption) error {
- // try to wait for finish last deleting
- fake := &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: dpm.ObjectMeta.Namespace,
- Name: dpm.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &appsv1.Deployment{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &appsv1.Deployment{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same deployment %v/%v to finish deleting \n", dpm.ObjectMeta.Namespace, dpm.ObjectMeta.Name)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
- return f.CreateResource(dpm, opts...)
-}
-
-func (f *Framework) DeleteDeployment(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
-
- pod := &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(pod, opts...)
-}
-
-func (f *Framework) GetDeployment(name, namespace string) (*appsv1.Deployment, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- dpm := &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(dpm)
- existing := &appsv1.Deployment{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetDeploymentPodList(dpm *appsv1.Deployment) (*corev1.PodList, error) {
-
- if dpm == nil {
- return nil, ErrWrongInput
- }
-
- pods := &corev1.PodList{}
- opts := []client.ListOption{
- client.MatchingLabelsSelector{
- Selector: labels.SelectorFromSet(dpm.Spec.Selector.MatchLabels),
- },
- }
- e := f.ListResource(pods, opts...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-func (f *Framework) ScaleDeployment(dpm *appsv1.Deployment, replicas int32) (*appsv1.Deployment, error) {
- if dpm == nil {
- return nil, ErrWrongInput
- }
-
- dpm.Spec.Replicas = pointer.Int32(replicas)
- err := f.UpdateResource(dpm)
- if err != nil {
- return nil, err
- }
- return dpm, nil
-}
-
-func (f *Framework) WaitDeploymentReady(name, namespace string, ctx context.Context) (*appsv1.Deployment, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- l := &client.ListOptions{
- Namespace: namespace,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
- }
- watchInterface, err := f.KClient.Watch(ctx, &appsv1.DeploymentList{}, l)
- if err != nil {
- return nil, ErrWatch
- }
- defer watchInterface.Stop()
-
- for {
- select {
- case event, ok := <-watchInterface.ResultChan():
- f.Log("deployment %v/%v\n", event, ok)
- if !ok {
- return nil, ErrChanelClosed
- }
- f.Log("deployment %v/%v %v event \n", namespace, name, event.Type)
- switch event.Type {
- case watch.Error:
- return nil, ErrEvent
- case watch.Deleted:
- return nil, ErrResDel
- default:
- dpm, ok := event.Object.(*appsv1.Deployment)
- if !ok {
- return nil, ErrGetObj
- }
- f.Log("deployment %v/%v readyReplicas=%+v\n", namespace, name, dpm.Status.ReadyReplicas)
- if dpm.Status.ReadyReplicas == *(dpm.Spec.Replicas) {
- return dpm, nil
- }
- }
- case <-ctx.Done():
- return nil, ErrTimeOut
- }
- }
-}
-
-func (f *Framework) CreateDeploymentUntilReady(deployObj *appsv1.Deployment, timeOut time.Duration, opts ...client.CreateOption) (*appsv1.Deployment, error) {
- if deployObj == nil {
- return nil, ErrWrongInput
- }
-
- // create deployment
- err := f.CreateDeployment(deployObj, opts...)
- if err != nil {
- return nil, err
- }
-
- // wait deployment ready
- ctx, cancel := context.WithTimeout(context.Background(), timeOut)
- defer cancel()
- deploy, e := f.WaitDeploymentReady(deployObj.Name, deployObj.Namespace, ctx)
- if e != nil {
- return nil, e
- }
- return deploy, nil
-}
-
-func (f *Framework) DeleteDeploymentUntilFinish(deployName, namespace string, timeOut time.Duration, opts ...client.DeleteOption) error {
- if deployName == "" || namespace == "" {
- return ErrWrongInput
- }
- // get deployment
- deployment, err1 := f.GetDeployment(deployName, namespace)
- if err1 != nil {
- return err1
- }
- // delete deployment
- err := f.DeleteDeployment(deployment.Name, deployment.Namespace, opts...)
- if err != nil {
- return err
- }
- // check delete deployment successfully
- ctx, cancel := context.WithTimeout(context.Background(), timeOut)
- defer cancel()
- b, e := func() (bool, error) {
- for {
- select {
- case <-ctx.Done():
- return false, ErrTimeOut
- default:
- deployment, _ := f.GetDeployment(deployment.Name, deployment.Namespace)
- if deployment == nil {
- return true, nil
- }
- time.Sleep(time.Second)
- }
- }
- }()
- if b {
- // check PodList not exists by label
- err := f.WaitPodListDeleted(deployment.Namespace, deployment.Spec.Selector.MatchLabels, ctx)
- if err != nil {
- return err
- }
- return nil
- }
- return e
-}
-
-func (f *Framework) WaitDeploymentReadyAndCheckIP(depName string, nsName string, timeout time.Duration) (*corev1.PodList, error) {
- // waiting for Deployment replicas to complete
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- dep, e := f.WaitDeploymentReady(depName, nsName, ctx)
- if e != nil {
- return nil, e
- }
-
- // check pods created by Deployment,its assign ipv4 and ipv6 addresses success
- podlist, err := f.GetDeploymentPodList(dep)
- if err != nil {
- return nil, err
- }
-
- // check IP address allocation succeeded
- errip := f.CheckPodListIpReady(podlist)
- if errip != nil {
- return nil, errip
- }
- return podlist, errip
-}
-
-func (f *Framework) RestartDeploymentPodUntilReady(deployName, namespace string, timeOut time.Duration, opts ...client.DeleteOption) error {
- if deployName == "" || namespace == "" {
- return ErrWrongInput
- }
-
- deployment, err := f.GetDeployment(deployName, namespace)
- if deployment == nil {
- return errors.New("failed to get deployment")
- }
- if err != nil {
- return err
- }
- podList, err := f.GetDeploymentPodList(deployment)
-
- if len(podList.Items) == 0 {
- return errors.New("failed to get podList")
- }
- if err != nil {
- return err
- }
- _, err = f.DeletePodListUntilReady(podList, timeOut, opts...)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/endpoint.go b/vendor/github.com/spidernet-io/e2eframework/framework/endpoint.go
deleted file mode 100644
index c675fb6c7..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/endpoint.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "fmt"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) GetEndpoint(name, namespace string) (*corev1.Endpoints, error) {
- key := types.NamespacedName{
- Name: name,
- Namespace: namespace,
- }
- ep := &corev1.Endpoints{}
- return ep, f.GetResource(key, ep)
-}
-
-func (f *Framework) ListEndpoint(options ...client.ListOption) (*corev1.EndpointsList, error) {
- eps := &corev1.EndpointsList{}
- err := f.ListResource(eps, options...)
- if err != nil {
- return nil, err
- }
- return eps, nil
-}
-
-// CreateEndpoint create an endpoint to testing GetEndpoint/ListEndpoint
-func (f *Framework) CreateEndpoint(ep *corev1.Endpoints, opts ...client.CreateOption) error {
- key := &corev1.Endpoints{
- ObjectMeta: metav1.ObjectMeta{
- Name: ep.ObjectMeta.Name,
- Namespace: ep.ObjectMeta.Namespace,
- },
- }
- fake := client.ObjectKeyFromObject(key)
- existing := &corev1.Endpoints{}
- e := f.GetResource(fake, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return fmt.Errorf("failed to create , a same endpoint %v/%v exists", ep.ObjectMeta.Namespace, ep.ObjectMeta.Name)
- }
- return f.CreateResource(ep, opts...)
-}
-
-func (f *Framework) DeleteEndpoint(name, namespace string, opts ...client.DeleteOption) error {
- ep := &corev1.Endpoints{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(ep, opts...)
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/error.go b/vendor/github.com/spidernet-io/e2eframework/framework/error.go
deleted file mode 100644
index 333219d4f..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/error.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import "errors"
-
-var ErrWrongInput = errors.New("input variable is not valid")
-var ErrTimeOut = errors.New("context timeout")
-var ErrChanelClosed = errors.New("channel is closed")
-var ErrWatch = errors.New("failed to Watch")
-var ErrEvent = errors.New("received error event")
-var ErrResDel = errors.New("resource is deleted")
-var ErrGetObj = errors.New("failed to get metaObject")
-var ErrAlreadyExisted = errors.New("failed to create , a same Controller %v/%v exist")
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/events.go b/vendor/github.com/spidernet-io/e2eframework/framework/events.go
deleted file mode 100644
index 667b2d416..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/events.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "fmt"
- "strings"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/watch"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) WaitExceptEventOccurred(ctx context.Context, eventKind, objName, objNamespace, message string) error {
-
- if eventKind == "" || objName == "" || objNamespace == "" || message == "" {
- return ErrWrongInput
- }
- l := &client.ListOptions{
- Raw: &metav1.ListOptions{
- TypeMeta: metav1.TypeMeta{Kind: eventKind},
- FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", objName, objNamespace),
- },
- }
- watchInterface, err := f.KClient.Watch(ctx, &corev1.EventList{}, l)
- if err != nil {
- return ErrWatch
- }
- defer watchInterface.Stop()
- for {
- select {
- case <-ctx.Done():
- return ErrTimeOut
- case event, ok := <-watchInterface.ResultChan():
- if !ok {
- return ErrChanelClosed
- }
- f.Log("watch event object %v", event.Object)
- switch event.Type {
- case watch.Error:
- return ErrEvent
- case watch.Deleted:
- return ErrResDel
- default:
- event, ok := event.Object.(*corev1.Event)
- if !ok {
- return ErrGetObj
- }
- f.Log("Event occurred message is %v \n", event.Message)
- if strings.Contains(event.Message, message) {
- return nil
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/framework.go b/vendor/github.com/spidernet-io/e2eframework/framework/framework.go
deleted file mode 100644
index 1ed226bd6..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/framework.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-
-package framework
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/mohae/deepcopy"
- batchv1 "k8s.io/api/batch/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "fmt"
- "os"
- "strconv"
-
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- apiextensions_v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
-)
-
-// -----------------------------
-
-type ClusterInfo struct {
- IpV4Enabled bool
- IpV6Enabled bool
- MultusEnabled bool
- SpiderIPAMEnabled bool
- WhereaboutIPAMEnabled bool
- ClusterName string
- KubeConfigPath string
- // docker container name for kind cluster
- KindNodeList []string
- KindNodeListRaw string
- // multus
- MultusDefaultCni string
- MultusAdditionalCni string
- SpiderSubnetEnabled bool
-}
-
-var ClusterInformation = &ClusterInfo{}
-
-type envconfig struct {
- EnvName string
- DestStr *string
- DestBool *bool
- Default string
- Required bool
- BoolType bool
-}
-
-const (
- E2E_CLUSTER_NAME = "E2E_CLUSTER_NAME"
- E2E_KUBECONFIG_PATH = "E2E_KUBECONFIG_PATH"
- E2E_IPV4_ENABLED = "E2E_IPV4_ENABLED"
- E2E_IPV6_ENABLED = "E2E_IPV6_ENABLED"
- E2E_MULTUS_CNI_ENABLED = "E2E_MULTUS_CNI_ENABLED"
- E2E_SPIDERPOOL_IPAM_ENABLED = "E2E_SPIDERPOOL_IPAM_ENABLED"
- E2E_WHEREABOUT_IPAM_ENABLED = "E2E_WHEREABOUT_IPAM_ENABLED"
- E2E_KIND_CLUSTER_NODE_LIST = "E2E_KIND_CLUSTER_NODE_LIST"
- E2E_Multus_DefaultCni = "E2E_Multus_DefaultCni"
- E2E_Multus_AdditionalCni = "E2E_Multus_AdditionalCni"
- E2E_SPIDERSUBNET_ENABLED = "E2E_SPIDERSUBNET_ENABLED"
-)
-
-var envConfigList = []envconfig{
- // --- multus field
- {EnvName: E2E_Multus_DefaultCni, DestStr: &ClusterInformation.MultusDefaultCni, Default: "", Required: false},
- {EnvName: E2E_Multus_AdditionalCni, DestStr: &ClusterInformation.MultusAdditionalCni, Default: "", Required: false},
- // --- require field
- {EnvName: E2E_CLUSTER_NAME, DestStr: &ClusterInformation.ClusterName, Default: "", Required: true},
- {EnvName: E2E_KUBECONFIG_PATH, DestStr: &ClusterInformation.KubeConfigPath, Default: "", Required: true},
- // ---- optional field
- {EnvName: E2E_IPV4_ENABLED, DestBool: &ClusterInformation.IpV4Enabled, Default: "true", Required: false},
- {EnvName: E2E_IPV6_ENABLED, DestBool: &ClusterInformation.IpV6Enabled, Default: "true", Required: false},
- {EnvName: E2E_MULTUS_CNI_ENABLED, DestBool: &ClusterInformation.MultusEnabled, Default: "false", Required: false},
- {EnvName: E2E_SPIDERPOOL_IPAM_ENABLED, DestBool: &ClusterInformation.SpiderIPAMEnabled, Default: "false", Required: false},
- {EnvName: E2E_WHEREABOUT_IPAM_ENABLED, DestBool: &ClusterInformation.WhereaboutIPAMEnabled, Default: "false", Required: false},
- // ---- kind field
- {EnvName: E2E_KIND_CLUSTER_NODE_LIST, DestStr: &ClusterInformation.KindNodeListRaw, Default: "false", Required: false},
- // ---- subnet field
- {EnvName: E2E_SPIDERSUBNET_ENABLED, DestBool: &ClusterInformation.SpiderSubnetEnabled, Default: "true", Required: false},
-
- // ---- vagrant field
-}
-
-// -------------------------------------------
-
-type FConfig struct {
- ApiOperateTimeout time.Duration
- ResourceDeleteTimeout time.Duration
-}
-
-type Framework struct {
- // clienset
- KClient client.WithWatch
- KConfig *rest.Config
-
- // cluster info
- Info ClusterInfo
-
- t TestingT
- Config FConfig
- EnableLog bool
-}
-
-// -------------------------------------------
-type TestingT interface {
- Logf(format string, args ...interface{})
-}
-
-var (
- Default_k8sClient_QPS float32 = 200
- Default_k8sClient_Burst int = 300
-
- Default_k8sClient_ApiOperateTimeout = 15 * time.Second
- Default_k8sClient_ResourceDeleteTimeout = 60 * time.Second
-)
-
-// NewFramework init Framework struct
-// fakeClient for unitest
-func NewFramework(t TestingT, schemeRegisterList []func(*runtime.Scheme) error, fakeClient ...client.WithWatch) (*Framework, error) {
-
- if t == nil {
- return nil, fmt.Errorf("miss TestingT")
- }
-
- var err error
- var ok bool
-
- // defer GinkgoRecover()
- if len(ClusterInformation.ClusterName) == 0 {
- if e := initClusterInfo(); e != nil {
- return nil, e
- }
- }
-
- f := &Framework{}
- f.t = t
- f.EnableLog = true
-
- v := deepcopy.Copy(*ClusterInformation)
- f.Info, ok = v.(ClusterInfo)
- if !ok {
- return nil, fmt.Errorf("internal error, failed to deepcopy")
- }
-
- if fakeClient != nil {
- f.KClient = fakeClient[0]
- } else {
- if f.Info.KubeConfigPath == "" {
- return nil, fmt.Errorf("miss KubeConfig Path")
- }
- f.KConfig, err = clientcmd.BuildConfigFromFlags("", f.Info.KubeConfigPath)
- if err != nil {
- return nil, fmt.Errorf("BuildConfigFromFlags failed % v", err)
- }
- f.KConfig.QPS = Default_k8sClient_QPS
- f.KConfig.Burst = Default_k8sClient_Burst
-
- scheme := runtime.NewScheme()
- err = corev1.AddToScheme(scheme)
- if err != nil {
- return nil, fmt.Errorf("failed to add runtime Scheme : %v", err)
- }
-
- err = appsv1.AddToScheme(scheme)
- if err != nil {
- return nil, fmt.Errorf("failed to add appsv1 Scheme : %v", err)
- }
-
- err = batchv1.AddToScheme(scheme)
- if err != nil {
- return nil, fmt.Errorf("failed to add batchv1 Scheme")
- }
-
- err = apiextensions_v1.AddToScheme(scheme)
- if err != nil {
- return nil, fmt.Errorf("failed to add apiextensions_v1 Scheme : %v", err)
- }
- // f.Client, err = client.New(f.kConfig, client.Options{Scheme: scheme})
-
- for n, v := range schemeRegisterList {
- if err := v(scheme); err != nil {
- return nil, fmt.Errorf("failed to add schemeRegisterList[%v], reason=%v ", n, err)
- }
- }
-
- f.KClient, err = client.NewWithWatch(f.KConfig, client.Options{Scheme: scheme})
- if err != nil {
- return nil, fmt.Errorf("failed to new clientset: %v", err)
- }
- }
-
- f.Config.ApiOperateTimeout = Default_k8sClient_ApiOperateTimeout
- f.Config.ResourceDeleteTimeout = Default_k8sClient_ResourceDeleteTimeout
-
- f.t.Logf("Framework ClusterInfo: %+v \n", f.Info)
- f.t.Logf("Framework Config: %+v \n", f.Config)
-
- return f, nil
-}
-
-// ------------- basic operate
-
-func (f *Framework) CreateResource(obj client.Object, opts ...client.CreateOption) error {
- ctx1, cancel1 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel1()
- return f.KClient.Create(ctx1, obj, opts...)
-}
-
-func (f *Framework) DeleteResource(obj client.Object, opts ...client.DeleteOption) error {
- ctx2, cancel2 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel2()
- return f.KClient.Delete(ctx2, obj, opts...)
-}
-
-func (f *Framework) GetResource(key client.ObjectKey, obj client.Object) error {
- ctx3, cancel3 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel3()
- return f.KClient.Get(ctx3, key, obj)
-}
-
-func (f *Framework) ListResource(list client.ObjectList, opts ...client.ListOption) error {
- ctx4, cancel4 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel4()
- return f.KClient.List(ctx4, list, opts...)
-}
-
-func (f *Framework) UpdateResource(obj client.Object, opts ...client.UpdateOption) error {
- ctx5, cancel5 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel5()
- return f.KClient.Update(ctx5, obj, opts...)
-}
-
-func (f *Framework) UpdateResourceStatus(obj client.Object, opts ...client.SubResourceUpdateOption) error {
- ctx6, cancel6 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel6()
- return f.KClient.Status().Update(ctx6, obj, opts...)
-}
-
-func (f *Framework) PatchResource(obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
- ctx7, cancel7 := context.WithTimeout(context.Background(), f.Config.ApiOperateTimeout)
- defer cancel7()
- return f.KClient.Patch(ctx7, obj, patch, opts...)
-}
-
-func initClusterInfo() error {
-
- for _, v := range envConfigList {
- t := os.Getenv(v.EnvName)
- if len(t) == 0 && v.Required {
- return fmt.Errorf("error, failed to get ENV %s", v.EnvName)
- }
- r := v.Default
- if len(t) > 0 {
- r = t
- }
- if v.DestStr != nil {
- *(v.DestStr) = r
- } else {
- if s, err := strconv.ParseBool(r); err != nil {
- return fmt.Errorf("error, %v require a bool value, but get %v", v.EnvName, r)
- } else {
- *(v.DestBool) = s
- }
- }
- }
-
- if len(ClusterInformation.KindNodeListRaw) > 0 {
- ClusterInformation.KindNodeList = strings.Split(ClusterInformation.KindNodeListRaw, ",")
- }
- return nil
-
-}
-
-func (f *Framework) Log(format string, args ...interface{}) {
- if f.EnableLog {
- f.t.Logf(format, args...)
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/job.go b/vendor/github.com/spidernet-io/e2eframework/framework/job.go
deleted file mode 100644
index 40a61be34..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/job.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
-
- //appsv1beta2 "k8s.io/api/apps/v1beta2"
- batchv1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateJob(jb *batchv1.Job, opts ...client.CreateOption) error {
-
- // try to wait for finish last deleting
- fake := &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: jb.ObjectMeta.Namespace,
- Name: jb.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &batchv1.Job{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &batchv1.Job{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same Job %v/%v to finish deleting \n", jb.ObjectMeta.Name, jb.ObjectMeta.Namespace)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
-
- return f.CreateResource(jb, opts...)
-}
-
-func (f *Framework) DeleteJob(name, namespace string, opts ...client.DeleteOption) error {
- if name == "" || namespace == "" {
- return ErrWrongInput
-
- }
-
- jb := &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(jb, opts...)
-}
-
-func (f *Framework) GetJob(name, namespace string) (*batchv1.Job, error) {
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- jb := &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(jb)
- existing := &batchv1.Job{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetJobPodList(jb *batchv1.Job) (*corev1.PodList, error) {
- if jb == nil {
- return nil, ErrWrongInput
- }
- pods := &corev1.PodList{}
- ops := []client.ListOption{
- client.MatchingLabelsSelector{
- Selector: labels.SelectorFromSet(jb.Spec.Selector.MatchLabels),
- },
- }
- e := f.ListResource(pods, ops...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-// WaitJobFinished wait for all job pod finish , no matter succceed or fail
-func (f *Framework) WaitJobFinished(jobName, namespace string, ctx context.Context) (*batchv1.Job, bool, error) {
- for {
- select {
- default:
- job, err := f.GetJob(jobName, namespace)
- if err != nil {
- return nil, false, err
- }
- for _, c := range job.Status.Conditions {
- if c.Type == batchv1.JobFailed && c.Status == corev1.ConditionTrue {
- return job, false, nil
- }
- if c.Type == batchv1.JobComplete && c.Status == corev1.ConditionTrue {
- return job, true, nil
- }
- }
-
- time.Sleep(time.Second)
- case <-ctx.Done():
- return nil, false, ErrTimeOut
-
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/multus.go b/vendor/github.com/spidernet-io/e2eframework/framework/multus.go
deleted file mode 100644
index 7d6f402f8..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/multus.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "fmt"
- "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) GetMultusInstance(name, namespace string) (*v1.NetworkAttachmentDefinition, error) {
- obj := &v1.NetworkAttachmentDefinition{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- },
- }
-
- key := client.ObjectKeyFromObject(obj)
- nad := &v1.NetworkAttachmentDefinition{}
- if err := f.GetResource(key, nad); err != nil {
- return nil, err
- }
- return nad, nil
-}
-
-func (f *Framework) ListMultusInstances(opts ...client.ListOption) (*v1.NetworkAttachmentDefinitionList, error) {
- nads := &v1.NetworkAttachmentDefinitionList{}
- if err := f.ListResource(nads, opts...); err != nil {
- return nil, err
- }
-
- return nads, nil
-}
-
-func (f *Framework) CreateMultusInstance(nad *v1.NetworkAttachmentDefinition, opts ...client.CreateOption) error {
- exist, err := f.GetMultusInstance(nad.Name, nad.Namespace)
- if err == nil && exist.DeletionTimestamp == nil {
- return fmt.Errorf("failed to create %s/%s, instance has exists", nad.ObjectMeta.Namespace, nad.ObjectMeta.Name)
- }
- return f.CreateResource(nad, opts...)
-}
-
-func (f *Framework) DeleteMultusInstance(name, namespace string) error {
- return f.DeleteResource(&v1.NetworkAttachmentDefinition{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- },
- })
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/namespace.go b/vendor/github.com/spidernet-io/e2eframework/framework/namespace.go
deleted file mode 100644
index 39398287a..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/namespace.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateNamespace(nsName string, opts ...client.CreateOption) error {
- ns := &corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: nsName,
- // Labels: map[string]string{"spiderpool-e2e-ns": "true"},
- },
- TypeMeta: metav1.TypeMeta{
- Kind: "Namespace",
- APIVersion: "v1",
- },
- }
-
- key := client.ObjectKeyFromObject(ns)
- existing := &corev1.Namespace{}
- e := f.GetResource(key, existing)
-
- if e == nil && existing.Status.Phase == corev1.NamespaceTerminating {
- r := func() bool {
- existing := &corev1.Namespace{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same namespace %v to finish deleting \n", nsName)
- return false
- }
- return true
- }
- if !tools.Eventually(r, f.Config.ResourceDeleteTimeout, time.Second) {
- return fmt.Errorf("time out to wait a deleting namespace")
- }
- }
- return f.CreateResource(ns, opts...)
-}
-
-func (f *Framework) CreateNamespaceUntilDefaultServiceAccountReady(nsName string, timeoutForSA time.Duration, opts ...client.CreateOption) error {
- if nsName == "" {
- return ErrWrongInput
- }
- err := f.CreateNamespace(nsName, opts...)
- if err != nil {
- return err
- }
- if timeoutForSA != 0 {
- err = f.WaitServiceAccountReady("default", nsName, timeoutForSA)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (f *Framework) GetNamespace(nsName string) (*corev1.Namespace, error) {
- if nsName == "" {
- return nil, ErrWrongInput
- }
- ns := &corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: nsName,
- },
- TypeMeta: metav1.TypeMeta{
- Kind: "Namespace",
- APIVersion: "v1",
- },
- }
- key := client.ObjectKeyFromObject(ns)
- namespace := &corev1.Namespace{}
- err := f.GetResource(key, namespace)
- if err != nil {
- return nil, err
- }
- return namespace, nil
-}
-
-func (f *Framework) DeleteNamespace(nsName string, opts ...client.DeleteOption) error {
-
- if nsName == "" {
- return ErrWrongInput
- }
-
- ns := &corev1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: nsName,
- },
- }
- return f.DeleteResource(ns, opts...)
-}
-
-func (f *Framework) DeleteNamespaceUntilFinish(nsName string, ctx context.Context, opts ...client.DeleteOption) error {
- if nsName == "" {
- return ErrWrongInput
- }
- err := f.DeleteNamespace(nsName, opts...)
- if err != nil {
- return err
- }
- for {
- select {
- default:
- namespace, _ := f.GetNamespace(nsName)
- if namespace == nil {
- return nil
- }
- time.Sleep(time.Second)
- case <-ctx.Done():
- return ErrTimeOut
- }
- }
-
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/node.go b/vendor/github.com/spidernet-io/e2eframework/framework/node.go
deleted file mode 100644
index cb3b7c1f4..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/node.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "time"
-
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) GetNode(nodeName string) (*corev1.Node, error) {
- ctx := context.TODO()
- node := &corev1.Node{}
- err1 := f.KClient.Get(ctx, types.NamespacedName{Name: nodeName}, node)
- if err1 != nil {
- return nil, err1
- }
- return node, nil
-}
-
-func (f *Framework) WaitClusterNodeReady(ctx context.Context) (bool, error) {
- for {
- select {
- default:
- nodeReadyNum := 0
- nodes, e := f.GetNodeList()
- if e != nil {
- return false, e
- }
- for _, node := range nodes.Items {
- if f.CheckNodeStatus(&node, true) {
- nodeReadyNum++
- }
- }
- if nodeReadyNum == len(nodes.Items) {
- return true, nil
- }
- time.Sleep(time.Second)
- case <-ctx.Done():
- return false, ErrTimeOut
- }
- }
-}
-
-func (f *Framework) GetNodeList(opts ...client.ListOption) (*corev1.NodeList, error) {
- nodes := &corev1.NodeList{}
- e := f.ListResource(nodes, opts...)
- if e != nil {
- return nil, e
- }
- return nodes, nil
-}
-
-func (f *Framework) CheckNodeStatus(node *corev1.Node, expectReady bool) bool {
-
- unreachTaintTemp := &corev1.Taint{
- Key: corev1.TaintNodeUnreachable,
- Effect: corev1.TaintEffectNoExecute,
- }
- notReadyTaintTemp := &corev1.Taint{
- Key: corev1.TaintNodeNotReady,
- Effect: corev1.TaintEffectNoExecute,
- }
- for _, cond := range node.Status.Conditions {
- // check whether the ready host have taints
- if cond.Type == corev1.NodeReady {
- haveTaints := false
- tat := node.Spec.Taints
- for _, tat := range tat {
- if tat.MatchTaint(unreachTaintTemp) || tat.MatchTaint(notReadyTaintTemp) {
- haveTaints = true
- break
- }
- }
- if expectReady {
- if (cond.Status == corev1.ConditionTrue) && !haveTaints {
- return true
- }
- return false
- }
- if cond.Status != corev1.ConditionTrue {
- return true
- }
- f.Log("nodename: %s is %v Reason: %v, message: %v",
- node.Name, cond.Status == corev1.ConditionTrue, cond.Reason, cond.Message)
- return false
- }
- }
- f.Log("%v failed to find condition %v", node.Name, corev1.NodeReady)
- return false
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/pod.go b/vendor/github.com/spidernet-io/e2eframework/framework/pod.go
deleted file mode 100644
index 2ad128f7d..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/pod.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/kubectl/pkg/util/podutils"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreatePod(pod *corev1.Pod, opts ...client.CreateOption) error {
- // try to wait for finish last deleting
- fake := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: pod.ObjectMeta.Namespace,
- Name: pod.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &corev1.Pod{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return fmt.Errorf("failed to create , a same pod %v/%v exists", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
- } else {
- t := func() bool {
- existing := &corev1.Pod{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same pod %v/%v to finish deleting \n", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
- }
- return f.CreateResource(pod, opts...)
-}
-
-func (f *Framework) DeletePod(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
-
- pod := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(pod, opts...)
-}
-
-func (f *Framework) GetPod(name, namespace string) (*corev1.Pod, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- pod := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(pod)
- existing := &corev1.Pod{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetPodList(opts ...client.ListOption) (*corev1.PodList, error) {
- pods := &corev1.PodList{}
- e := f.ListResource(pods, opts...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-func (f *Framework) WaitPodStarted(name, namespace string, ctx context.Context) (*corev1.Pod, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- // refer to https://github.com/kubernetes-sigs/controller-runtime/blob/master/pkg/client/watch_test.go
- l := &client.ListOptions{
- Namespace: namespace,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
- }
- watchInterface, err := f.KClient.Watch(ctx, &corev1.PodList{}, l)
- if err != nil {
- return nil, ErrWatch
- }
- defer watchInterface.Stop()
-
- for {
- select {
- // if pod not exist , got no event
- case event, ok := <-watchInterface.ResultChan():
- if !ok {
- return nil, ErrChanelClosed
- }
- f.Log("pod %v/%v %v event \n", namespace, name, event.Type)
- // Added EventType = "ADDED"
- // Modified EventType = "MODIFIED"
- // Deleted EventType = "DELETED"
- // Bookmark EventType = "BOOKMARK"
- // Error EventType = "ERROR"
- switch event.Type {
- case watch.Error:
- return nil, fmt.Errorf("received error event: %+v", event)
- case watch.Deleted:
- return nil, fmt.Errorf("resource is deleted")
- default:
- pod, ok := event.Object.(*corev1.Pod)
- // metaObject, ok := event.Object.(metav1.Object)
- if !ok {
- return nil, fmt.Errorf("failed to get metaObject")
- }
- f.Log("pod %v/%v status=%+v\n", namespace, name, pod.Status.Phase)
- if pod.Status.Phase == corev1.PodPending || pod.Status.Phase == corev1.PodUnknown {
- break
- } else {
- return pod, nil
- }
- }
- case <-ctx.Done():
- return nil, ErrTimeOut
- }
- }
-}
-
-func (f *Framework) WaitPodListDeleted(namespace string, label map[string]string, ctx context.Context) error {
- // Query all pods corresponding to the label
- // Delete the resource until the query is empty
-
- if namespace == "" || label == nil {
- return ErrWrongInput
- }
-
- opts := []client.ListOption{
- client.InNamespace(namespace),
- client.MatchingLabels(label),
- }
- for {
- select {
- case <-ctx.Done():
- return ErrTimeOut
- default:
- podlist, err := f.GetPodList(opts...)
- if err != nil {
- return err
- } else if len(podlist.Items) == 0 {
- return nil
- }
- time.Sleep(time.Second)
- }
- }
-}
-
-func (f *Framework) DeletePodUntilFinish(name, namespace string, ctx context.Context, opts ...client.DeleteOption) error {
- // Query all pods by name in namespace
- // Delete the resource until the query is empty
- if namespace == "" || name == "" {
- return ErrWrongInput
- }
- err := f.DeletePod(name, namespace, opts...)
- if err != nil {
- return err
- }
- for {
- select {
- case <-ctx.Done():
- return ErrTimeOut
- default:
- pod, _ := f.GetPod(name, namespace)
- if pod == nil {
- return nil
- }
- time.Sleep(time.Second)
- }
- }
-}
-
-func (f *Framework) CheckPodListIpReady(podList *corev1.PodList) error {
- var v4IpList = make(map[string]string)
- var v6IpList = make(map[string]string)
-
- for _, pod := range podList.Items {
- if pod.Status.PodIPs == nil {
- return fmt.Errorf("pod %v failed to assign ip", pod.Name)
- }
- f.Log("pod %v ips: %+v \n", pod.Name, pod.Status.PodIPs)
- if f.Info.IpV4Enabled {
- ip, ok := tools.CheckPodIpv4IPReady(&pod)
- if !ok {
- return fmt.Errorf("pod %v failed to get ipv4 ip", pod.Name)
- }
- if d, ok := v4IpList[ip]; ok {
- return fmt.Errorf("pod %v and %v have conflicted ipv4 ip %v", pod.Name, d, ip)
- }
- v4IpList[ip] = pod.Name
- f.Log("succeeded to check pod %v ipv4 ip \n", pod.Name)
- }
- if f.Info.IpV6Enabled {
- ip, err := tools.CheckPodIpv6IPReady(&pod)
- if !err {
- return fmt.Errorf("pod %v failed to get ipv6 ip", pod.Name)
- }
- if d, ok := v6IpList[ip]; ok {
- return fmt.Errorf("pod %v and %v have conflicted ipv6 ip %v", pod.Name, d, ip)
- }
- v6IpList[ip] = pod.Name
- f.Log("succeeded to check pod %v ipv6 ip \n", pod.Name)
- }
- }
- return nil
-}
-
-func (f *Framework) GetPodListByLabel(label map[string]string) (*corev1.PodList, error) {
- if label == nil {
- return nil, ErrWrongInput
- }
- ops := []client.ListOption{
- client.MatchingLabels(label),
- }
- return f.GetPodList(ops...)
-}
-
-func (f *Framework) CheckPodListRunning(podList *corev1.PodList) bool {
- if podList == nil {
- return false
- }
- for _, item := range podList.Items {
- if item.Status.Phase != "Running" {
- return false
- }
- }
- return true
-}
-
-func (f *Framework) DeletePodList(podList *corev1.PodList, opts ...client.DeleteOption) error {
- if podList == nil {
- return ErrWrongInput
- }
- for _, item := range podList.Items {
- err := f.DeletePod(item.Name, item.Namespace, opts...)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (f *Framework) WaitPodListRunning(label map[string]string, expectedPodNum int, ctx context.Context) error {
- if label == nil || expectedPodNum == 0 {
- return ErrWrongInput
- }
- for {
- select {
- default:
- // get pod list
- podList, err := f.GetPodListByLabel(label)
- if err != nil {
- return err
- }
- if len(podList.Items) != expectedPodNum {
- break
- }
-
- // wait pod list Running
- if f.CheckPodListRunning(podList) {
- return nil
- }
- time.Sleep(time.Second)
- case <-ctx.Done():
- return fmt.Errorf("time out to wait podList ready")
- }
- }
-}
-
-func (f *Framework) DeletePodListRepeatedly(label map[string]string, interval time.Duration, ctx context.Context, opts ...client.DeleteOption) error {
- for {
- select {
- case <-ctx.Done():
- return nil
- default:
- podList, e1 := f.GetPodListByLabel(label)
- if e1 != nil {
- return e1
- }
- e2 := f.DeletePodList(podList, opts...)
- if e2 != nil {
- return e2
- }
- time.Sleep(interval)
- }
- }
-}
-
-func (f *Framework) DeletePodListUntilReady(podList *corev1.PodList, timeOut time.Duration, opts ...client.DeleteOption) (*corev1.PodList, error) {
- if podList == nil {
- return nil, ErrWrongInput
- }
-
- err := f.DeletePodList(podList, opts...)
- if err != nil {
- f.Log("failed to DeletePodList")
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), timeOut)
- defer cancel()
-OUTER:
- for {
- time.Sleep(time.Second)
- select {
- case <-ctx.Done():
- return nil, ErrTimeOut
- default:
- }
- f.Log("checking restarted pod ")
-
- podListWithLabel, err := f.GetPodListByLabel(podList.Items[0].Labels)
- if err != nil {
- f.Log("failed to GetPodListByLabel , reason=%v", err)
- continue
- }
-
- if len(podListWithLabel.Items) != len(podList.Items) {
- continue
- }
-
- for _, newPod := range podListWithLabel.Items {
- if !podutils.IsPodReady(&newPod) {
- continue OUTER
- }
- for _, oldPod := range podList.Items {
- if newPod.ObjectMeta.UID == oldPod.ObjectMeta.UID {
- continue OUTER
- }
- }
-
- // make sure pod ready
- for _, newPodContainer := range newPod.Status.ContainerStatuses {
- if !newPodContainer.Ready {
- continue OUTER
- }
- }
- }
- return podListWithLabel, nil
- }
-}
-
-// Waiting for all pods in all namespaces to run
-func (f *Framework) WaitAllPodUntilRunning(ctx context.Context) error {
- var AllPodList *corev1.PodList
- var err error
-
- for {
- select {
- case <-ctx.Done():
- return ErrTimeOut
- default:
-
- // GetPodList(opts ... ListOption) If no value is specified, get all the namespace's pods
- AllPodList, err = f.GetPodList()
- if err != nil {
- return err
- }
-
- if f.CheckPodListRunning(AllPodList) {
- return nil
- }
- time.Sleep(time.Second)
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/replicaset.go b/vendor/github.com/spidernet-io/e2eframework/framework/replicaset.go
deleted file mode 100644
index 79e002e5a..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/replicaset.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "time"
-
- "github.com/spidernet-io/e2eframework/tools"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/utils/pointer"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateReplicaSet(rs *appsv1.ReplicaSet, opts ...client.CreateOption) error {
- // try to wait for finish last deleting
- fake := &appsv1.ReplicaSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: rs.ObjectMeta.Namespace,
- Name: rs.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &appsv1.ReplicaSet{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &appsv1.ReplicaSet{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same ReplicaSet %v/%v to finish deleting \n", rs.ObjectMeta.Namespace, rs.ObjectMeta.Name)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
- return f.CreateResource(rs, opts...)
-}
-
-func (f *Framework) DeleteReplicaSet(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
-
- pod := &appsv1.ReplicaSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(pod, opts...)
-}
-
-func (f *Framework) GetReplicaSet(name, namespace string) (*appsv1.ReplicaSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- rs := &appsv1.ReplicaSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(rs)
- existing := &appsv1.ReplicaSet{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetReplicaSetPodList(rs *appsv1.ReplicaSet) (*corev1.PodList, error) {
-
- if rs == nil {
- return nil, ErrWrongInput
- }
-
- pods := &corev1.PodList{}
- opts := []client.ListOption{
- client.MatchingLabelsSelector{
- Selector: labels.SelectorFromSet(rs.Spec.Selector.MatchLabels),
- },
- }
- e := f.ListResource(pods, opts...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-func (f *Framework) ScaleReplicaSet(rs *appsv1.ReplicaSet, replicas int32) (*appsv1.ReplicaSet, error) {
- if rs == nil {
- return nil, ErrWrongInput
- }
- rs.Spec.Replicas = pointer.Int32(replicas)
- err := f.UpdateResource(rs)
- if err != nil {
- return nil, err
- }
- return rs, nil
-}
-
-func (f *Framework) WaitReplicaSetReady(name, namespace string, ctx context.Context) (*appsv1.ReplicaSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- l := &client.ListOptions{
- Namespace: namespace,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
- }
- watchInterface, err := f.KClient.Watch(ctx, &appsv1.ReplicaSetList{}, l)
- if err != nil {
- return nil, ErrWatch
- }
- defer watchInterface.Stop()
-
- for {
- select {
- case event, ok := <-watchInterface.ResultChan():
- f.Log("ReplicaSet %v/%v\n", event, ok)
- if !ok {
- return nil, ErrChanelClosed
- }
- f.Log("ReplicaSet %v/%v %v event \n", namespace, name, event.Type)
- switch event.Type {
- case watch.Error:
- return nil, ErrEvent
- case watch.Deleted:
- return nil, ErrResDel
- default:
- rs, ok := event.Object.(*appsv1.ReplicaSet)
- if !ok {
- return nil, ErrGetObj
- }
- f.Log("ReplicaSet %v/%v readyReplicas=%+v\n", namespace, name, rs.Status.ReadyReplicas)
- if rs.Status.ReadyReplicas == *(rs.Spec.Replicas) {
- return rs, nil
- }
- }
- case <-ctx.Done():
- return nil, ErrTimeOut
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/service.go b/vendor/github.com/spidernet-io/e2eframework/framework/service.go
deleted file mode 100644
index af5cbaeb4..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/service.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "fmt"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateService(service *corev1.Service, opts ...client.CreateOption) error {
- if service == nil {
- return ErrWrongInput
- }
- // try to wait for finish last deleting
- key := types.NamespacedName{
- Name: service.ObjectMeta.Name,
- Namespace: service.ObjectMeta.Namespace,
- }
- existing := &corev1.Service{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return fmt.Errorf("failed to create , a same service %v/%v exists", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
- }
- return f.CreateResource(service, opts...)
-}
-
-func (f *Framework) GetService(name, namespace string) (*corev1.Service, error) {
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
- key := types.NamespacedName{
- Name: name,
- Namespace: namespace,
- }
- service := &corev1.Service{}
- return service, f.GetResource(key, service)
-}
-
-func (f *Framework) ListService(options ...client.ListOption) (*corev1.ServiceList, error) {
- services := &corev1.ServiceList{}
- err := f.ListResource(services, options...)
- if err != nil {
- return nil, err
- }
- return services, nil
-}
-
-func (f *Framework) DeleteService(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
-
- service := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(service, opts...)
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/serviceaccounts.go b/vendor/github.com/spidernet-io/e2eframework/framework/serviceaccounts.go
deleted file mode 100644
index 12d7b4ec5..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/serviceaccounts.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- corev1 "k8s.io/api/core/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "time"
-)
-
-func (f *Framework) GetServiceAccount(saName, namespace string) (*corev1.ServiceAccount, error) {
- if saName == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- key := client.ObjectKey{
- Namespace: namespace,
- Name: saName,
- }
- existing := &corev1.ServiceAccount{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) WaitServiceAccountReady(saName, namespace string, timeout time.Duration) error {
- if saName == "" || namespace == "" {
- return ErrWrongInput
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- for {
- select {
- default:
- as, _ := f.GetServiceAccount(saName, namespace)
- if as != nil {
- return nil
- }
- time.Sleep(time.Second)
- case <-ctx.Done():
- return ErrTimeOut
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/statefulset.go b/vendor/github.com/spidernet-io/e2eframework/framework/statefulset.go
deleted file mode 100644
index 9d394172f..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/statefulset.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-import (
- "context"
- "time"
-
- "k8s.io/utils/pointer"
-
- "github.com/spidernet-io/e2eframework/tools"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- api_errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/watch"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func (f *Framework) CreateStatefulSet(sts *appsv1.StatefulSet, opts ...client.CreateOption) error {
- // try to wait for finish last deleting
- fake := &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: sts.ObjectMeta.Namespace,
- Name: sts.ObjectMeta.Name,
- },
- }
- key := client.ObjectKeyFromObject(fake)
- existing := &appsv1.StatefulSet{}
- e := f.GetResource(key, existing)
- if e == nil && existing.ObjectMeta.DeletionTimestamp == nil {
- return ErrAlreadyExisted
- }
- t := func() bool {
- existing := &appsv1.StatefulSet{}
- e := f.GetResource(key, existing)
- b := api_errors.IsNotFound(e)
- if !b {
- f.Log("waiting for a same statefulSet %v/%v to finish deleting \n", sts.ObjectMeta.Namespace, sts.ObjectMeta.Name)
- return false
- }
- return true
- }
- if !tools.Eventually(t, f.Config.ResourceDeleteTimeout, time.Second) {
- return ErrTimeOut
- }
-
- return f.CreateResource(sts, opts...)
-}
-
-func (f *Framework) DeleteStatefulSet(name, namespace string, opts ...client.DeleteOption) error {
-
- if name == "" || namespace == "" {
- return ErrWrongInput
- }
-
- sts := &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- return f.DeleteResource(sts, opts...)
-}
-
-func (f *Framework) GetStatefulSet(name, namespace string) (*appsv1.StatefulSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- sts := &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- }
- key := client.ObjectKeyFromObject(sts)
- existing := &appsv1.StatefulSet{}
- e := f.GetResource(key, existing)
- if e != nil {
- return nil, e
- }
- return existing, e
-}
-
-func (f *Framework) GetStatefulSetPodList(sts *appsv1.StatefulSet) (*corev1.PodList, error) {
- if sts == nil {
- return nil, ErrWrongInput
- }
-
- pods := &corev1.PodList{}
- ops := []client.ListOption{
- client.MatchingLabelsSelector{
- Selector: labels.SelectorFromSet(sts.Spec.Selector.MatchLabels),
- },
- }
- e := f.ListResource(pods, ops...)
- if e != nil {
- return nil, e
- }
- return pods, nil
-}
-
-func (f *Framework) ScaleStatefulSet(sts *appsv1.StatefulSet, replicas int32) (*appsv1.StatefulSet, error) {
- if sts == nil {
- return nil, ErrWrongInput
- }
- sts.Spec.Replicas = pointer.Int32(replicas)
- err := f.UpdateResource(sts)
- if err != nil {
- return nil, err
- }
- return sts, nil
-}
-
-func (f *Framework) WaitStatefulSetReady(name, namespace string, ctx context.Context) (*appsv1.StatefulSet, error) {
-
- if name == "" || namespace == "" {
- return nil, ErrWrongInput
- }
-
- l := &client.ListOptions{
- Namespace: namespace,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
- }
- watchInterface, err := f.KClient.Watch(ctx, &appsv1.StatefulSetList{}, l)
- if err != nil {
- return nil, ErrWatch
- }
- defer watchInterface.Stop()
-
- for {
- select {
- // if sts not exist , got no event
- case event, ok := <-watchInterface.ResultChan():
- if !ok {
- return nil, ErrChanelClosed
- }
- f.Log("statefulSet %v/%v %v event \n", namespace, name, event.Type)
-
- // Added EventType = "ADDED"
- // Modified EventType = "MODIFIED"
- // Deleted EventType = "DELETED"
- // Bookmark EventType = "BOOKMARK"
- // Error EventType = "ERROR"
- switch event.Type {
- case watch.Error:
- return nil, ErrEvent
- case watch.Deleted:
- return nil, ErrResDel
- default:
- sts, ok := event.Object.(*appsv1.StatefulSet)
- // metaObject, ok := event.Object.(metav1.Object)
- if !ok {
- return nil, ErrGetObj
- }
- f.Log("statefulSet %v/%v readyReplicas=%+v\n", namespace, name, sts.Status.ReadyReplicas)
- if sts.Status.ReadyReplicas == *(sts.Spec.Replicas) && sts.Status.CurrentReplicas == *(sts.Spec.Replicas) {
- return sts, nil
- }
- }
- case <-ctx.Done():
- return nil, ErrTimeOut
- }
- }
-}
diff --git a/vendor/github.com/spidernet-io/e2eframework/framework/vagrant.go b/vendor/github.com/spidernet-io/e2eframework/framework/vagrant.go
deleted file mode 100644
index 33136f71d..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/framework/vagrant.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package framework
-
-// operate vm
diff --git a/vendor/github.com/spidernet-io/e2eframework/tools/tools.go b/vendor/github.com/spidernet-io/e2eframework/tools/tools.go
deleted file mode 100644
index f2a9bb5fd..000000000
--- a/vendor/github.com/spidernet-io/e2eframework/tools/tools.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2022 Authors of spidernet-io
-// SPDX-License-Identifier: Apache-2.0
-package tools
-
-import (
- "github.com/asaskevich/govalidator"
- corev1 "k8s.io/api/core/v1"
-
- // metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "fmt"
- "time"
-)
-
-func CheckPodIpv4IPReady(pod *corev1.Pod) (string, bool) {
- if pod == nil {
- return "", false
- }
- for _, v := range pod.Status.PodIPs {
- if govalidator.IsIPv4(v.IP) {
- return v.IP, true
- }
- }
- return "", false
-}
-
-func CheckPodIpv6IPReady(pod *corev1.Pod) (string, bool) {
- if pod == nil {
- return "", false
- }
- for _, v := range pod.Status.PodIPs {
- if govalidator.IsIPv6(v.IP) {
- return v.IP, true
- }
- }
- return "", false
-}
-
-func RandomName() string {
- m := time.Now()
- return fmt.Sprintf("%v%v-%v", m.Minute(), m.Second(), m.Nanosecond())
-}
-
-// simulate Eventually for internal
-func Eventually(f func() bool, timeout time.Duration, interval time.Duration) bool {
- timeoutAfter := time.After(timeout)
- for {
- select {
- case <-timeoutAfter:
- return false
- default:
- }
- if f() {
- return true
- }
- time.Sleep(interval)
- }
-}
diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE
deleted file mode 100644
index 8dada3eda..000000000
--- a/vendor/k8s.io/kubectl/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go b/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
deleted file mode 100644
index ddd2b9dbe..000000000
--- a/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package podutils
-
-import (
- "time"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/integer"
-)
-
-// IsPodAvailable returns true if a pod is available; false otherwise.
-// Precondition for an available pod is that it must be ready. On top
-// of that, there are two cases when a pod can be considered available:
-// 1. minReadySeconds == 0, or
-// 2. LastTransitionTime (is set) + minReadySeconds < current time
-func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool {
- if !IsPodReady(pod) {
- return false
- }
-
- c := getPodReadyCondition(pod.Status)
- minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
- if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
- return true
- }
- return false
-}
-
-// IsPodReady returns true if a pod is ready; false otherwise.
-func IsPodReady(pod *corev1.Pod) bool {
- return isPodReadyConditionTrue(pod.Status)
-}
-
-// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
-func isPodReadyConditionTrue(status corev1.PodStatus) bool {
- condition := getPodReadyCondition(status)
- return condition != nil && condition.Status == corev1.ConditionTrue
-}
-
-// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
-// Returns nil if the condition is not present.
-func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition {
- _, condition := getPodCondition(&status, corev1.PodReady)
- return condition
-}
-
-// GetPodCondition extracts the provided condition from the given status and returns that.
-// Returns nil and -1 if the condition is not present, and the index of the located condition.
-func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
- if status == nil {
- return -1, nil
- }
- return getPodConditionFromList(status.Conditions, conditionType)
-}
-
-// GetPodConditionFromList extracts the provided condition from the given list of condition and
-// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
-func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
- if conditions == nil {
- return -1, nil
- }
- for i := range conditions {
- if conditions[i].Type == conditionType {
- return i, &conditions[i]
- }
- }
- return -1, nil
-}
-
-// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
-type ByLogging []*corev1.Pod
-
-func (s ByLogging) Len() int { return len(s) }
-func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s ByLogging) Less(i, j int) bool {
- // 1. assigned < unassigned
- if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
- return len(s[i].Spec.NodeName) > 0
- }
- // 2. PodRunning < PodUnknown < PodPending
- m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2}
- if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
- return m[s[i].Status.Phase] < m[s[j].Status.Phase]
- }
- // 3. ready < not ready
- if IsPodReady(s[i]) != IsPodReady(s[j]) {
- return IsPodReady(s[i])
- }
- // TODO: take availability into account when we push minReadySeconds information from deployment into pods,
- // see https://github.com/kubernetes/kubernetes/issues/22065
- // 4. Been ready for more time < less time < empty time
- if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
- return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
- }
- // 5. Pods with containers with higher restart counts < lower restart counts
- if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
- return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
- }
- // 6. older pods < newer pods < empty timestamp pods
- if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
- return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
- }
- return false
-}
-
-// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
-type ActivePods []*corev1.Pod
-
-func (s ActivePods) Len() int { return len(s) }
-func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s ActivePods) Less(i, j int) bool {
- // 1. Unassigned < assigned
- // If only one of the pods is unassigned, the unassigned one is smaller
- if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
- return len(s[i].Spec.NodeName) == 0
- }
- // 2. PodPending < PodUnknown < PodRunning
- m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2}
- if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
- return m[s[i].Status.Phase] < m[s[j].Status.Phase]
- }
- // 3. Not ready < ready
- // If only one of the pods is not ready, the not ready one is smaller
- if IsPodReady(s[i]) != IsPodReady(s[j]) {
- return !IsPodReady(s[i])
- }
- // TODO: take availability into account when we push minReadySeconds information from deployment into pods,
- // see https://github.com/kubernetes/kubernetes/issues/22065
- // 4. Been ready for empty time < less time < more time
- // If both pods are ready, the latest ready one is smaller
- if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
- return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
- }
- // 5. Pods with containers with higher restart counts < lower restart counts
- if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
- return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
- }
- // 6. Empty creation time pods < newer pods < older pods
- if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
- return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
- }
- return false
-}
-
-// afterOrZero checks if time t1 is after time t2; if one of them
-// is zero, the zero time is seen as after non-zero time.
-func afterOrZero(t1, t2 *metav1.Time) bool {
- if t1.Time.IsZero() || t2.Time.IsZero() {
- return t1.Time.IsZero()
- }
- return t1.After(t2.Time)
-}
-
-func podReadyTime(pod *corev1.Pod) *metav1.Time {
- for _, c := range pod.Status.Conditions {
- // we only care about pod ready conditions
- if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
- return &c.LastTransitionTime
- }
- }
- return &metav1.Time{}
-}
-
-func maxContainerRestarts(pod *corev1.Pod) int {
- maxRestarts := 0
- for _, c := range pod.Status.ContainerStatuses {
- maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
- }
- return maxRestarts
-}
-
-// ContainerType and VisitContainers are taken from
-// https://github.com/kubernetes/kubernetes/blob/master/pkg/api/v1/pod/util.go
-// kubectl cannot directly import this due to project goals
-
-// ContainerType signifies container type
-type ContainerType int
-
-const (
- // Containers is for normal containers
- Containers ContainerType = 1 << iota
- // InitContainers is for init containers
- InitContainers
- // EphemeralContainers is for ephemeral containers
- EphemeralContainers
-)
-
-// AllContainers specifies that all containers be visited.
-const AllContainers ContainerType = (InitContainers | Containers | EphemeralContainers)
-
-// ContainerVisitor is called with each container spec, and returns true
-// if visiting should continue.
-type ContainerVisitor func(container *corev1.Container, containerType ContainerType) (shouldContinue bool)
-
-// VisitContainers invokes the visitor function with a pointer to every container
-// spec in the given pod spec with type set in mask. If visitor returns false,
-// visiting is short-circuited. VisitContainers returns true if visiting completes,
-// false if visiting was short-circuited.
-func VisitContainers(podSpec *corev1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
- if mask&InitContainers != 0 {
- for i := range podSpec.InitContainers {
- if !visitor(&podSpec.InitContainers[i], InitContainers) {
- return false
- }
- }
- }
- if mask&Containers != 0 {
- for i := range podSpec.Containers {
- if !visitor(&podSpec.Containers[i], Containers) {
- return false
- }
- }
- }
- if mask&EphemeralContainers != 0 {
- for i := range podSpec.EphemeralContainers {
- if !visitor((*corev1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
- return false
- }
- }
- }
- return true
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9c195c1f8..743de3d32 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -221,10 +221,6 @@ github.com/josharian/native
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0
-## explicit; go 1.17
-github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io
-github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1
# github.com/kr/pretty v0.3.1
## explicit; go 1.12
github.com/kr/pretty
@@ -409,10 +405,6 @@ github.com/spf13/viper/internal/encoding/json
github.com/spf13/viper/internal/encoding/toml
github.com/spf13/viper/internal/encoding/yaml
github.com/spf13/viper/internal/features
-# github.com/spidernet-io/e2eframework v0.0.0-20230403061847-445757b963b3
-## explicit; go 1.19
-github.com/spidernet-io/e2eframework/framework
-github.com/spidernet-io/e2eframework/tools
# github.com/stretchr/testify v1.8.4
## explicit; go 1.20
github.com/stretchr/testify/assert
@@ -899,9 +891,6 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/spec3
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/validation/spec
-# k8s.io/kubectl v0.27.2
-## explicit; go 1.20
-k8s.io/kubectl/pkg/util/podutils
# k8s.io/utils v0.0.0-20230726121419-3b25d923346b
## explicit; go 1.18
k8s.io/utils/buffer