mirror of
https://github.com/kubernetes/ingress-nginx.git
synced 2025-02-06 11:00:07 +00:00
Fix minor typos (#11935)
This commit is contained in:
parent
a647bc1b7a
commit
4f23049374
2
.github/workflows/zz-tmpl-images.yaml
vendored
2
.github/workflows/zz-tmpl-images.yaml
vendored
@ -1,5 +1,5 @@
|
|||||||
#### THIS IS A TEMPLATE ####
|
#### THIS IS A TEMPLATE ####
|
||||||
# This workflow is created to be a template for every time an e2e teest is required,
|
# This workflow is created to be a template for every time an e2e test is required,
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
|
2
.github/workflows/zz-tmpl-k8s-e2e.yaml
vendored
2
.github/workflows/zz-tmpl-k8s-e2e.yaml
vendored
@ -1,5 +1,5 @@
|
|||||||
#### THIS IS A TEMPLATE ####
|
#### THIS IS A TEMPLATE ####
|
||||||
# This workflow is created to be a template for every time an e2e teest is required,
|
# This workflow is created to be a template for every time an e2e test is required,
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
|
52
Changelog.md
52
Changelog.md
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
All New change are in [Changelog](./changelog)
|
All New change are in [Changelog](./changelog)
|
||||||
|
|
||||||
### 1.5.1
|
### 1.5.1
|
||||||
|
|
||||||
* Upgrade NGINX to 1.21.6
|
* Upgrade NGINX to 1.21.6
|
||||||
* Upgrade Golang 1.19.2
|
* Upgrade Golang 1.19.2
|
||||||
@ -102,18 +102,18 @@ Images:
|
|||||||
### Community Updates
|
### Community Updates
|
||||||
|
|
||||||
We will discuss the results of our Community Survey, progress on the stabilization project, and ideas going
|
We will discuss the results of our Community Survey, progress on the stabilization project, and ideas going
|
||||||
forward with the project at
|
forward with the project at
|
||||||
[Kubecon NA 2022 in Detroit](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/). Come join us
|
[Kubecon NA 2022 in Detroit](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/). Come join us
|
||||||
and let us hear what you'd like to see in the future for ingress-nginx.
|
and let us hear what you'd like to see in the future for ingress-nginx.
|
||||||
|
|
||||||
https://kccncna2022.sched.com/event/18lgl?iframe=no
|
https://kccncna2022.sched.com/event/18lgl?iframe=no
|
||||||
|
|
||||||
[**Kubernetes Registry change notice**](https://twitter.com/BenTheElder/status/1575898507235323904)
|
[**Kubernetes Registry change notice**](https://twitter.com/BenTheElder/status/1575898507235323904)
|
||||||
The [@kubernetesio](https://twitter.com/kubernetesio) container image host http://k8s.gcr.io is
|
The [@kubernetesio](https://twitter.com/kubernetesio) container image host http://k8s.gcr.io is
|
||||||
*actually* getting redirected to the community controlled http://registry.k8s.io starting with a small portion of
|
*actually* getting redirected to the community controlled http://registry.k8s.io starting with a small portion of
|
||||||
traffic on October 3rd.
|
traffic on October 3rd.
|
||||||
|
|
||||||
If you notice any issues, *please* ping [Ben Elder](https://twitter.com/BenTheElder),
|
If you notice any issues, *please* ping [Ben Elder](https://twitter.com/BenTheElder),
|
||||||
[@thockin](https://twitter.com/thockin), [@ameukam](https://twitter.com/ameukam),or report issues in slack to
|
[@thockin](https://twitter.com/thockin), [@ameukam](https://twitter.com/ameukam),or report issues in slack to
|
||||||
[sig-k8s-infra slack channel](https://kubernetes.slack.com/archives/CCK68P2Q2).
|
[sig-k8s-infra slack channel](https://kubernetes.slack.com/archives/CCK68P2Q2).
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ If you notice any issues, *please* ping [Ben Elder](https://twitter.com/BenTheEl
|
|||||||
[8890](https://github.com/kubernetes/ingress-nginx/pull/8890)
|
[8890](https://github.com/kubernetes/ingress-nginx/pull/8890)
|
||||||
* Update to Prometheus metric names, more information [available here]( https://github.com/kubernetes/ingress-nginx/pull/8728
|
* Update to Prometheus metric names, more information [available here]( https://github.com/kubernetes/ingress-nginx/pull/8728
|
||||||
)
|
)
|
||||||
* Deprecated Kubernetes versions 1.20-1.21, Added support for, 1.25, currently supported versions v1.22, v1.23, v1.24, v1.25
|
* Deprecated Kubernetes versions 1.20-1.21, Added support for, 1.25, currently supported versions v1.22, v1.23, v1.24, v1.25
|
||||||
|
|
||||||
ADDED
|
ADDED
|
||||||
* `_request_duration_seconds` Histogram
|
* `_request_duration_seconds` Histogram
|
||||||
@ -203,11 +203,11 @@ Images:
|
|||||||
|
|
||||||
### 1.3.1
|
### 1.3.1
|
||||||
|
|
||||||
In v1.3.1 leader elections will be done entirely using the Lease API and no longer using configmaps.
|
In v1.3.1 leader elections will be done entirely using the Lease API and no longer using configmaps.
|
||||||
v1.3.0 is a safe transition version, using v1.3.0 can automatically complete the merging of election locks, and then you can safely upgrade to v1.3.1.
|
v1.3.0 is a safe transition version, using v1.3.0 can automatically complete the merging of election locks, and then you can safely upgrade to v1.3.1.
|
||||||
|
|
||||||
Also, *important note*, with the Release of Kubernetes v1.25 we are dropping support for the legacy branches,
|
Also, *important note*, with the Release of Kubernetes v1.25 we are dropping support for the legacy branches,
|
||||||
Also, *important note*, with the release of Kubernetes v1.25, we are dropping support for the legacy edition,
|
Also, *important note*, with the release of Kubernetes v1.25, we are dropping support for the legacy edition,
|
||||||
that means all version <1.0.0 of the ingress-nginx-controller.
|
that means all version <1.0.0 of the ingress-nginx-controller.
|
||||||
|
|
||||||
## Image:
|
## Image:
|
||||||
@ -277,11 +277,11 @@ All other Changes
|
|||||||
|
|
||||||
### 1.3.0
|
### 1.3.0
|
||||||
|
|
||||||
Image:
|
Image:
|
||||||
- registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5
|
- registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5
|
||||||
- registry.k8s.io/ingress-nginx/controller-chroot:v1.3.0@sha256:0fcb91216a22aae43b374fc2e6a03b8afe9e8c78cbf07a09d75636dc4ea3c191
|
- registry.k8s.io/ingress-nginx/controller-chroot:v1.3.0@sha256:0fcb91216a22aae43b374fc2e6a03b8afe9e8c78cbf07a09d75636dc4ea3c191
|
||||||
|
|
||||||
_IMPORTANT CHANGES:_
|
_IMPORTANT CHANGES:_
|
||||||
* This release removes support for Kubernetes v1.19.0
|
* This release removes support for Kubernetes v1.19.0
|
||||||
* This release adds support for Kubernetes v1.24.0
|
* This release adds support for Kubernetes v1.24.0
|
||||||
* Starting with this release, we will need permissions on the `coordination.k8s.io/leases` resource for leaderelection lock
|
* Starting with this release, we will need permissions on the `coordination.k8s.io/leases` resource for leaderelection lock
|
||||||
@ -352,11 +352,11 @@ _Changes:_
|
|||||||
|
|
||||||
### 1.2.0
|
### 1.2.0
|
||||||
|
|
||||||
Image:
|
Image:
|
||||||
- k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185
|
- k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185
|
||||||
- k8s.gcr.io/ingress-nginx/controller-chroot:v1.2.0@sha256:fb17f1700b77d4fcc52ca6f83ffc2821861ae887dbb87149cf5cbc52bea425e5
|
- k8s.gcr.io/ingress-nginx/controller-chroot:v1.2.0@sha256:fb17f1700b77d4fcc52ca6f83ffc2821861ae887dbb87149cf5cbc52bea425e5
|
||||||
|
|
||||||
This minor version release, introduces 2 breaking changes. For the first time, an option to jail/chroot the nginx process, inside the controller container, is being introduced.. This provides an additional layer of security, for sensitive information like K8S serviceaccounts. This release also brings a special new feature of deep inspection into objects. The inspection is a walk through of all the spec, checking for possible attempts to escape configs. Currently such an inspection only occurs for `networking.Ingress`. Additionally there are fixes for the recently announced CVEs on busybox & ssl_client. And there is a fix to a recently introduced redirection related bug, that was setting the protocol on URLs to "nil".
|
This minor version release, introduces 2 breaking changes. For the first time, an option to jail/chroot the nginx process, inside the controller container, is being introduced. This provides an additional layer of security, for sensitive information like K8S serviceaccounts. This release also brings a special new feature of deep inspection into objects. The inspection is a walk through of all the spec, checking for possible attempts to escape configs. Currently such an inspection only occurs for `networking.Ingress`. Additionally there are fixes for the recently announced CVEs on busybox & ssl_client. And there is a fix to a recently introduced redirection related bug, that was setting the protocol on URLs to "nil".
|
||||||
|
|
||||||
_Changes:_
|
_Changes:_
|
||||||
|
|
||||||
@ -419,7 +419,7 @@ _Changes:_
|
|||||||
**Image:**
|
**Image:**
|
||||||
- k8s.gcr.io/ingress-nginx/controller:v1.1.3@sha256:31f47c1e202b39fadecf822a9b76370bd4baed199a005b3e7d4d1455f4fd3fe2
|
- k8s.gcr.io/ingress-nginx/controller:v1.1.3@sha256:31f47c1e202b39fadecf822a9b76370bd4baed199a005b3e7d4d1455f4fd3fe2
|
||||||
|
|
||||||
This release upgrades Alpine to 3.14.4 and nginx to 1.19.10
|
This release upgrades Alpine to 3.14.4 and nginx to 1.19.10
|
||||||
|
|
||||||
Patches [OpenSSL CVE-2022-0778](https://github.com/kubernetes/ingress-nginx/issues/8339)
|
Patches [OpenSSL CVE-2022-0778](https://github.com/kubernetes/ingress-nginx/issues/8339)
|
||||||
|
|
||||||
@ -460,7 +460,7 @@ _Changes:_
|
|||||||
|
|
||||||
### 1.1.2
|
### 1.1.2
|
||||||
|
|
||||||
**Image:**
|
**Image:**
|
||||||
- k8s.gcr.io/ingress-nginx/controller:v1.1.2@sha256:28b11ce69e57843de44e3db6413e98d09de0f6688e33d4bd384002a44f78405c
|
- k8s.gcr.io/ingress-nginx/controller:v1.1.2@sha256:28b11ce69e57843de44e3db6413e98d09de0f6688e33d4bd384002a44f78405c
|
||||||
|
|
||||||
This release bumps grpc version to 1.44.0 & runc to version 1.1.0. The release also re-introduces the ingress.class annotation, which was previously declared as deprecated. Besides that, several bug fixes and improvements are listed below.
|
This release bumps grpc version to 1.44.0 & runc to version 1.1.0. The release also re-introduces the ingress.class annotation, which was previously declared as deprecated. Besides that, several bug fixes and improvements are listed below.
|
||||||
@ -502,7 +502,7 @@ _Changes:_
|
|||||||
|
|
||||||
### 1.1.1
|
### 1.1.1
|
||||||
|
|
||||||
**Image:**
|
**Image:**
|
||||||
- k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
|
- k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
|
||||||
|
|
||||||
This release contains several fixes and improvements. This image is now built using Go v1.17.6 and gRPC v1.43.0. See detailed list below.
|
This release contains several fixes and improvements. This image is now built using Go v1.17.6 and gRPC v1.43.0. See detailed list below.
|
||||||
@ -571,9 +571,9 @@ _Changes:_
|
|||||||
|
|
||||||
_Possible Breaking Change_
|
_Possible Breaking Change_
|
||||||
We now implement string sanitization in annotation values. This means that words like "location", "by_lua" and
|
We now implement string sanitization in annotation values. This means that words like "location", "by_lua" and
|
||||||
others will drop the reconciliation of an Ingress object.
|
others will drop the reconciliation of an Ingress object.
|
||||||
|
|
||||||
Users from mod_security and other features should be aware that some blocked values may be used by those features
|
Users from mod_security and other features should be aware that some blocked values may be used by those features
|
||||||
and must be manually unblocked by the Ingress Administrator.
|
and must be manually unblocked by the Ingress Administrator.
|
||||||
|
|
||||||
For more details please check [https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#annotation-value-word-blocklist]
|
For more details please check [https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#annotation-value-word-blocklist]
|
||||||
@ -592,7 +592,7 @@ _Changes:_
|
|||||||
- k8s.gcr.io/ingress-nginx/controller:v1.0.4@sha256:545cff00370f28363dad31e3b59a94ba377854d3a11f18988f5f9e56841ef9ef
|
- k8s.gcr.io/ingress-nginx/controller:v1.0.4@sha256:545cff00370f28363dad31e3b59a94ba377854d3a11f18988f5f9e56841ef9ef
|
||||||
|
|
||||||
_Possible Breaking Change_
|
_Possible Breaking Change_
|
||||||
We have disabled the builtin ssl_session_cache due to possible memory fragmentation. This should not impact the majority of users, but please let us know
|
We have disabled the builtin ssl_session_cache due to possible memory fragmentation. This should not impact the majority of users, but please let us know
|
||||||
if you face any problem
|
if you face any problem
|
||||||
|
|
||||||
_Changes:_
|
_Changes:_
|
||||||
@ -608,7 +608,7 @@ _Changes:_
|
|||||||
- k8s.gcr.io/ingress-nginx/controller:v1.0.3@sha256:4ade87838eb8256b094fbb5272d7dda9b6c7fa8b759e6af5383c1300996a7452
|
- k8s.gcr.io/ingress-nginx/controller:v1.0.3@sha256:4ade87838eb8256b094fbb5272d7dda9b6c7fa8b759e6af5383c1300996a7452
|
||||||
|
|
||||||
**Known Issues**
|
**Known Issues**
|
||||||
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.4, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.4, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
||||||
|
|
||||||
_New Features:_
|
_New Features:_
|
||||||
|
|
||||||
@ -624,7 +624,7 @@ _Changes:_
|
|||||||
- k8s.gcr.io/ingress-nginx/controller:v1.0.2@sha256:85b53b493d6d658d8c013449223b0ffd739c76d76dc9bf9000786669ec04e049
|
- k8s.gcr.io/ingress-nginx/controller:v1.0.2@sha256:85b53b493d6d658d8c013449223b0ffd739c76d76dc9bf9000786669ec04e049
|
||||||
|
|
||||||
**Known Issues**
|
**Known Issues**
|
||||||
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.3, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.3, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
||||||
|
|
||||||
_New Features:_
|
_New Features:_
|
||||||
|
|
||||||
@ -640,7 +640,7 @@ _Changes:_
|
|||||||
- k8s.gcr.io/ingress-nginx/controller:v1.0.1@sha256:26bbd57f32bac3b30f90373005ef669aae324a4de4c19588a13ddba399c6664e
|
- k8s.gcr.io/ingress-nginx/controller:v1.0.1@sha256:26bbd57f32bac3b30f90373005ef669aae324a4de4c19588a13ddba399c6664e
|
||||||
|
|
||||||
**Known Issues**
|
**Known Issues**
|
||||||
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.2, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
* Ingress controller now (starting from v1.0.0) mandates cluster scoped access to IngressClass. This leads to problems when updating old Ingress controller to newest version, as described [here](https://github.com/kubernetes/ingress-nginx/issues/7510). We plan to fix it in v1.0.2, see [this](https://github.com/kubernetes/ingress-nginx/pull/7578).
|
||||||
|
|
||||||
_New Features:_
|
_New Features:_
|
||||||
|
|
||||||
@ -883,7 +883,7 @@ _Changes:_
|
|||||||
test #7255
|
test #7255
|
||||||
- [X] [#7216](https://github.com/kubernetes/ingress-nginx/pull/7216) Admission: Skip validation checks if an ingress
|
- [X] [#7216](https://github.com/kubernetes/ingress-nginx/pull/7216) Admission: Skip validation checks if an ingress
|
||||||
is marked as deleted #7216
|
is marked as deleted #7216
|
||||||
|
|
||||||
### 1.0.0-beta.3
|
### 1.0.0-beta.3
|
||||||
** This is a breaking change**
|
** This is a breaking change**
|
||||||
|
|
||||||
@ -2193,7 +2193,7 @@ _New Features:_
|
|||||||
|
|
||||||
If the active connections end before that, the pod will terminate gracefully at that time.
|
If the active connections end before that, the pod will terminate gracefully at that time.
|
||||||
|
|
||||||
To efectively take advantage of this feature, the Configmap feature [worker-shutdown-timeout](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#worker-shutdown-timeout) new value is `240s` instead of `10s`.
|
To effectively take advantage of this feature, the Configmap feature [worker-shutdown-timeout](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#worker-shutdown-timeout) new value is `240s` instead of `10s`.
|
||||||
|
|
||||||
**IMPORTANT:** this value has a side effect during reloads, consuming more memory until the old NGINX workers are replaced.
|
**IMPORTANT:** this value has a side effect during reloads, consuming more memory until the old NGINX workers are replaced.
|
||||||
|
|
||||||
@ -2603,7 +2603,7 @@ _New Features:_
|
|||||||
_Breaking changes:_
|
_Breaking changes:_
|
||||||
|
|
||||||
- The NGINX server listening in port 18080 was removed. It was replaced by a server using an unix socket as port [#3684](https://github.com/kubernetes/ingress-nginx/pull/3684)
|
- The NGINX server listening in port 18080 was removed. It was replaced by a server using an unix socket as port [#3684](https://github.com/kubernetes/ingress-nginx/pull/3684)
|
||||||
This server was internal to the ingress controller. In case this was being acceded from the outside, you can restore the old server using the `http-snipet` feature in the configuration configmap like:
|
This server was internal to the ingress controller. In case this was being acceded from the outside, you can restore the old server using the `http-snippet` feature in the configuration configmap like:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
http-snippet: |
|
http-snippet: |
|
||||||
|
@ -177,21 +177,21 @@ Promoting the images basically means that images, that were pushed to staging co
|
|||||||
```
|
```
|
||||||
- The -L 38 was used for 2 reasons.
|
- The -L 38 was used for 2 reasons.
|
||||||
- Default number of results is 30 and there were more than 30 PRs merged while releasing v1.1.1. If you see the current/soon-to-be-old changelog, you can look at the most recent PR number that has been accounted for already, and start from after that last accounted for PR.
|
- Default number of results is 30 and there were more than 30 PRs merged while releasing v1.1.1. If you see the current/soon-to-be-old changelog, you can look at the most recent PR number that has been accounted for already, and start from after that last accounted for PR.
|
||||||
- The other reason to use -L 38 was to ommit the 39th, the 40th and the 41st line in the resulting list. These were non-relevant PRs.
|
- The other reason to use -L 38 was to omit the 39th, the 40th and the 41st line in the resulting list. These were non-relevant PRs.
|
||||||
- If you save the output of above command to a file called prlist.txt. It looks somewhat like this ;
|
- If you save the output of above command to a file called prlist.txt. It looks somewhat like this ;
|
||||||
|
|
||||||
```
|
```
|
||||||
% cat ~/Downloads/prlist.txt
|
% cat ~/Downloads/prlist.txt
|
||||||
8129 fix syntax in docs for multi-tls example
|
8129 fix syntax in docs for multi-tls example
|
||||||
8120 Update go in runner and release v1.1.1
|
8120 Update go in runner and release v1.1.1
|
||||||
8119 Update to go v1.17.6
|
8119 Update to go v1.17.6
|
||||||
8118 Remove deprecated libraries, update other libs
|
8118 Remove deprecated libraries, update other libs
|
||||||
8117 Fix codegen errors
|
8117 Fix codegen errors
|
||||||
8115 chart/ghaction: set the correct permission to have access to push a release
|
8115 chart/ghaction: set the correct permission to have access to push a release
|
||||||
....
|
....
|
||||||
```
|
```
|
||||||
You can delete the lines, that refer to PRs of the release process itself. We only need to list the feature/bugfix PRs. You can also delete the lines that are housekeeping or not really worth mentioning in the changelog.
|
You can delete the lines, that refer to PRs of the release process itself. We only need to list the feature/bugfix PRs. You can also delete the lines that are housekeeping or not really worth mentioning in the changelog.
|
||||||
- you use some easy automation in bash/python/other, to get the PR-List that can be used in the changelog. For example, its possible to use a bash scripty way, seen below, to convert those plaintext PR numbers into clickable links.
|
- you use some easy automation in bash/python/other, to get the PR-List that can be used in the changelog. For example, it's possible to use a bash scripty way, seen below, to convert those plaintext PR numbers into clickable links.
|
||||||
|
|
||||||
```
|
```
|
||||||
#!/usr/bin/bash
|
#!/usr/bin/bash
|
||||||
@ -205,7 +205,7 @@ Promoting the images basically means that images, that were pushed to staging co
|
|||||||
done <$file
|
done <$file
|
||||||
|
|
||||||
```
|
```
|
||||||
- There was a parsing issue and path issue on MacOS, so above scrpt had to be modified and MacOS monterey compatible script is below ;
|
- There was a parsing issue and path issue on MacOS, so above script had to be modified and MacOS monterey compatible script is below ;
|
||||||
|
|
||||||
```
|
```
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
@ -231,7 +231,7 @@ Promoting the images basically means that images, that were pushed to staging co
|
|||||||
- tag
|
- tag
|
||||||
- digest
|
- digest
|
||||||
|
|
||||||
- [helm-docs](https://github.com/norwoodj/helm-docs) is a tool that generates the README.md for a helm-chart automatically. In the CI pipeline workflow of github actions (/.github/workflows/ci.yaml), you can see how helm-docs is used. But the CI pipeline is not designed to make commits back into the project. So we need to run helm-docs manually, and check in the resulting autogenerated README.md at the path /charts/ingress-nginx/README.md
|
- [helm-docs](https://github.com/norwoodj/helm-docs) is a tool that generates the README.md for a helm-chart automatically. In the CI pipeline workflow of github actions (/.github/workflows/ci.yaml), you can see how helm-docs is used. But the CI pipeline is not designed to make commits back into the project. So we need to run helm-docs manually, and check in the resulting autogenerated README.md at the path /charts/ingress-nginx/README.md
|
||||||
```
|
```
|
||||||
GOBIN=$PWD GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0
|
GOBIN=$PWD GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0
|
||||||
./helm-docs --chart-search-root=${GITHUB_WORKSPACE}/charts
|
./helm-docs --chart-search-root=${GITHUB_WORKSPACE}/charts
|
||||||
@ -274,7 +274,7 @@ Promoting the images basically means that images, that were pushed to staging co
|
|||||||
|
|
||||||
### h. Update README.md
|
### h. Update README.md
|
||||||
|
|
||||||
- Update the table in README.md in the root of the projet to reflect the support matrix. Add the new release version and details in there.
|
- Update the table in README.md in the root of the project to reflect the support matrix. Add the new release version and details in there.
|
||||||
|
|
||||||
## 5. RELEASE new version
|
## 5. RELEASE new version
|
||||||
|
|
||||||
@ -291,7 +291,7 @@ Promoting the images basically means that images, that were pushed to staging co
|
|||||||
- `helm repo update`
|
- `helm repo update`
|
||||||
- `helm search repo ingress-nginx`
|
- `helm search repo ingress-nginx`
|
||||||
|
|
||||||
## 6. Github release
|
## 6. GitHub release
|
||||||
|
|
||||||
- Release to github
|
- Release to github
|
||||||
|
|
||||||
|
@ -20,14 +20,14 @@ It all starts with the OSI model...
|
|||||||
### Approaching the problem
|
### Approaching the problem
|
||||||
|
|
||||||
|
|
||||||
Not everybody knows everything. But the factors that help are a love/passion for this to begin. But to move forward, its the approach and not the knowledge that sustains prolonged joy, while working on issues. If the approach is simple and powered by good-wishes-for-community, then info & tools are forthcoming and easy.
|
Not everybody knows everything. But the factors that help are a love/passion for this to begin. But to move forward, it's the approach and not the knowledge that sustains prolonged joy, while working on issues. If the approach is simple and powered by good-wishes-for-community, then info & tools are forthcoming and easy.
|
||||||
|
|
||||||
Here we take a bird's eye-view of the hops in the network plumbing, that a packet takes, from source to destination, when we run `curl`, from a laptop to a nginx webserver process, running in a container, inside a pod, inside a Kubernetes cluster, created using `kind` or `minikube` or any other cluster-management tool.
|
Here we take a bird's eye-view of the hops in the network plumbing, that a packet takes, from source to destination, when we run `curl`, from a laptop to a nginx webserver process, running in a container, inside a pod, inside a Kubernetes cluster, created using `kind` or `minikube` or any other cluster-management tool.
|
||||||
|
|
||||||
### [Kind](https://kind.sigs.k8s.io/) cluster example on a Linux Host
|
### [Kind](https://kind.sigs.k8s.io/) cluster example on a Linux Host
|
||||||
|
|
||||||
#### TL;DR
|
#### TL;DR
|
||||||
The destination of the packet from the curl command, is looked up, in the `routing table`. Based on the route, the the packet first travels to the virtual bridge `172.18.0.1` interface, created by docker, when we created the kind cluster on a laptop. Next the packet is forwarded to `172.18.0.2`(See below on how we got this IP address), within the kind cluster. The `kube-proxy` container creates iptables rules that make sure the packet goes to the correct pod ip in this case `10.244.0.5`
|
The destination of the packet from the curl command, is looked up, in the `routing table`. Based on the route, the packet first travels to the virtual bridge `172.18.0.1` interface, created by docker, when we created the kind cluster on a laptop. Next the packet is forwarded to `172.18.0.2`(See below on how we got this IP address), within the kind cluster. The `kube-proxy` container creates iptables rules that make sure the packet goes to the correct pod ip in this case `10.244.0.5`
|
||||||
|
|
||||||
Command:
|
Command:
|
||||||
```
|
```
|
||||||
@ -435,7 +435,7 @@ virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
|
|||||||
```
|
```
|
||||||
Output Relevance: From the above output you can see there are two Virtual Bridges created by minikube when we created the cluster on the network. Here, `virbr0` is the default NAT network bridge while `virbr2` is a isolated network bridge on which the pods run.
|
Output Relevance: From the above output you can see there are two Virtual Bridges created by minikube when we created the cluster on the network. Here, `virbr0` is the default NAT network bridge while `virbr2` is a isolated network bridge on which the pods run.
|
||||||
|
|
||||||
Minikube creates a Virtual Machine, to enter the virtual machine we can simple do:
|
Minikube creates a Virtual Machine, to enter the virtual machine we can simply do:
|
||||||
```
|
```
|
||||||
# minikube ssh
|
# minikube ssh
|
||||||
```
|
```
|
||||||
@ -707,7 +707,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
|||||||
httpd ClusterIP 10.104.111.0 <none> 80/TCP 13s
|
httpd ClusterIP 10.104.111.0 <none> 80/TCP 13s
|
||||||
```
|
```
|
||||||
|
|
||||||
Once we have this we can now create a n ingress using the following
|
Once we have this we can now create an ingress using the following
|
||||||
```
|
```
|
||||||
kubectl -n httpd create ingress httpd --class nginx --rule httpd.dev.leonnunes.com/"*"=httpd:80
|
kubectl -n httpd create ingress httpd --class nginx --rule httpd.dev.leonnunes.com/"*"=httpd:80
|
||||||
```
|
```
|
||||||
@ -771,7 +771,7 @@ Hypertext Transfer Protocol
|
|||||||
[Response in frame: 6]
|
[Response in frame: 6]
|
||||||
|
|
||||||
```
|
```
|
||||||
The above output shows the information that the `httpd` pod recieves. The `curl` command sends the host header, `Host: httpd.dev.leonnunes.com`, to the nginx controller, that then matches the rule and sends the information to the right controller
|
The above output shows the information that the `httpd` pod receives. The `curl` command sends the host header, `Host: httpd.dev.leonnunes.com`, to the nginx controller, that then matches the rule and sends the information to the right controller
|
||||||
|
|
||||||
The following output shows what is sent via the laptop.
|
The following output shows what is sent via the laptop.
|
||||||
```
|
```
|
||||||
|
@ -49,7 +49,7 @@ fi
|
|||||||
SSL_VOLUME=$(mktemp -d)
|
SSL_VOLUME=$(mktemp -d)
|
||||||
|
|
||||||
function cleanup {
|
function cleanup {
|
||||||
echo -e "${BGREEN}Stoping kubectl proxy${NC}"
|
echo -e "${BGREEN}Stopping kubectl proxy${NC}"
|
||||||
rm -rf "${SSL_VOLUME}"
|
rm -rf "${SSL_VOLUME}"
|
||||||
kill "$proxy_pid"
|
kill "$proxy_pid"
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ Images:
|
|||||||
* Chart: Improve IngressClass documentation. (#11104)
|
* Chart: Improve IngressClass documentation. (#11104)
|
||||||
* Chart: Deploy `PodDisruptionBudget` with KEDA. (#11032)
|
* Chart: Deploy `PodDisruptionBudget` with KEDA. (#11032)
|
||||||
* Undo #11062 since it breaks the nginx config (#11082)
|
* Undo #11062 since it breaks the nginx config (#11082)
|
||||||
* [mTLS] Fix acme verfication when mTLS and Client CN verification is enabled (#11062)
|
* [mTLS] Fix acme verification when mTLS and Client CN verification is enabled (#11062)
|
||||||
* golangci-lint update, ci cleanup, group dependabot updates (#11071)
|
* golangci-lint update, ci cleanup, group dependabot updates (#11071)
|
||||||
* bump golang (#11070)
|
* bump golang (#11070)
|
||||||
* feature(leader_election): flag to disable leader election feature on controller (#11064)
|
* feature(leader_election): flag to disable leader election feature on controller (#11064)
|
||||||
|
@ -83,7 +83,7 @@ Images:
|
|||||||
* ModSecurity dependencies update to avoid Memory Leaks (#9330)
|
* ModSecurity dependencies update to avoid Memory Leaks (#9330)
|
||||||
* fix(hpa): deprecated api version, bump to v2 (#9348)
|
* fix(hpa): deprecated api version, bump to v2 (#9348)
|
||||||
* fix(typo): pluralize provider (#9346)
|
* fix(typo): pluralize provider (#9346)
|
||||||
* removed deprecation messsage for ingressClass annotation (#9357)
|
* removed deprecation message for ingressClass annotation (#9357)
|
||||||
* added ginkgo junit reports (#9350)
|
* added ginkgo junit reports (#9350)
|
||||||
* Fix typos found by codespell (#9353)
|
* Fix typos found by codespell (#9353)
|
||||||
* bumped ginkgo to v2.5.1 in testrunner (#9340)
|
* bumped ginkgo to v2.5.1 in testrunner (#9340)
|
||||||
|
@ -15,7 +15,7 @@ Images:
|
|||||||
* Add support for --container flag (#9703)
|
* Add support for --container flag (#9703)
|
||||||
* Fix typo in OpenTelemetry (#9903)
|
* Fix typo in OpenTelemetry (#9903)
|
||||||
* ensure make lua-test runs locally (#9902)
|
* ensure make lua-test runs locally (#9902)
|
||||||
* update k8s.io dependecies to v0.26.4 (#9893)
|
* update k8s.io dependencies to v0.26.4 (#9893)
|
||||||
* Adding resource type to default HPA configuration to resolve issues with Terraform helm chart usage (#9803)
|
* Adding resource type to default HPA configuration to resolve issues with Terraform helm chart usage (#9803)
|
||||||
* I have not been able to fulfill my maintainer responsibilities for a while already, making it official now. (#9883)
|
* I have not been able to fulfill my maintainer responsibilities for a while already, making it official now. (#9883)
|
||||||
* Update k8s versions (#9879)
|
* Update k8s versions (#9879)
|
||||||
|
@ -39,7 +39,7 @@ on our new [ingress-nginx-dev mailing list](https://groups.google.com/a/kubernet
|
|||||||
* Correct annotations in monitoring docs (#9976)
|
* Correct annotations in monitoring docs (#9976)
|
||||||
* fix: avoid builds and tests for changes to markdown (#9962)
|
* fix: avoid builds and tests for changes to markdown (#9962)
|
||||||
* Validate path types (#9967)
|
* Validate path types (#9967)
|
||||||
* HPA: Use capabilites & align manifests. (#9521)
|
* HPA: Use capabilities & align manifests. (#9521)
|
||||||
* Use dl.k8s.io instead of hardcoded GCS URIs (#9946)
|
* Use dl.k8s.io instead of hardcoded GCS URIs (#9946)
|
||||||
* add option for annotations in PodDisruptionBudget (#9843)
|
* add option for annotations in PodDisruptionBudget (#9843)
|
||||||
* chore: update httpbin to httpbun (#9919)
|
* chore: update httpbin to httpbun (#9919)
|
||||||
|
@ -26,7 +26,7 @@ Images:
|
|||||||
* Add golangci github action and replace the deprecated golint (#10187)
|
* Add golangci github action and replace the deprecated golint (#10187)
|
||||||
* BUGFIX incorrect indentation (#10254)
|
* BUGFIX incorrect indentation (#10254)
|
||||||
* Upgrade OpenTelemetry to v1.11.0 and gRPC to v1.57.0 (#10352)
|
* Upgrade OpenTelemetry to v1.11.0 and gRPC to v1.57.0 (#10352)
|
||||||
* fix: path with sepecial characters warning #10281 #10308 (#10330)
|
* fix: path with special characters warning #10281 #10308 (#10330)
|
||||||
* Fix golangci-lint errors (#10196)
|
* Fix golangci-lint errors (#10196)
|
||||||
* chore(build): Fix Run make dev-env syntax error (#10294)
|
* chore(build): Fix Run make dev-env syntax error (#10294)
|
||||||
* Add firewall configuration to quick start documentation (#10357)
|
* Add firewall configuration to quick start documentation (#10357)
|
||||||
|
@ -26,7 +26,7 @@ Images:
|
|||||||
* Add golangci github action and replace the deprecated golint (#10187)
|
* Add golangci github action and replace the deprecated golint (#10187)
|
||||||
* BUGFIX incorrect indentation (#10254)
|
* BUGFIX incorrect indentation (#10254)
|
||||||
* Upgrade OpenTelemetry to v1.11.0 and gRPC to v1.57.0 (#10352)
|
* Upgrade OpenTelemetry to v1.11.0 and gRPC to v1.57.0 (#10352)
|
||||||
* fix: path with sepecial characters warning #10281 #10308 (#10330)
|
* fix: path with special characters warning #10281 #10308 (#10330)
|
||||||
* Fix golangci-lint errors (#10196)
|
* Fix golangci-lint errors (#10196)
|
||||||
* chore(build): Fix Run make dev-env syntax error (#10294)
|
* chore(build): Fix Run make dev-env syntax error (#10294)
|
||||||
* Add firewall configuration to quick start documentation (#10357)
|
* Add firewall configuration to quick start documentation (#10357)
|
||||||
|
@ -5,7 +5,7 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
|
|||||||
### 4.1.2
|
### 4.1.2
|
||||||
|
|
||||||
* [8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed
|
* [8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed
|
||||||
* [8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter
|
* [8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePrefix Helm chart parameter
|
||||||
* [8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart
|
* [8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart
|
||||||
|
|
||||||
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.1.0...helm-chart-4.1.2
|
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.1.0...helm-chart-4.1.2
|
||||||
|
@ -6,7 +6,7 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
|
|||||||
|
|
||||||
* helm: Fix opentelemetry module installation for daemonset (#9792)
|
* helm: Fix opentelemetry module installation for daemonset (#9792)
|
||||||
* Update charts/* to keep project name display aligned (#9931)
|
* Update charts/* to keep project name display aligned (#9931)
|
||||||
* HPA: Use capabilites & align manifests. (#9521)
|
* HPA: Use capabilities & align manifests. (#9521)
|
||||||
* PodDisruptionBudget spec logic update (#9904)
|
* PodDisruptionBudget spec logic update (#9904)
|
||||||
* add option for annotations in PodDisruptionBudget (#9843)
|
* add option for annotations in PodDisruptionBudget (#9843)
|
||||||
* Update Ingress-Nginx version controller-v1.8.0
|
* Update Ingress-Nginx version controller-v1.8.0
|
||||||
|
@ -131,7 +131,7 @@ func GetIngressDefinitions(flags *genericclioptions.ConfigFlags, namespace strin
|
|||||||
return pods.Items, nil
|
return pods.Items, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNumEndpoints counts the number of endpointslices adresses for the service with the given name
|
// GetNumEndpoints counts the number of endpointslices addresses for the service with the given name
|
||||||
func GetNumEndpoints(flags *genericclioptions.ConfigFlags, namespace, serviceName string) (*int, error) {
|
func GetNumEndpoints(flags *genericclioptions.ConfigFlags, namespace, serviceName string) (*int, error) {
|
||||||
epss, err := GetEndpointSlicesByName(flags, namespace, serviceName)
|
epss, err := GetEndpointSlicesByName(flags, namespace, serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -291,7 +291,7 @@ Do not try to edit it manually.
|
|||||||
### [[Shutdown] Grace period shutdown](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/grace_period.go#L32)
|
### [[Shutdown] Grace period shutdown](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/grace_period.go#L32)
|
||||||
- [/healthz should return status code 500 during shutdown grace period](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/grace_period.go#L35)
|
- [/healthz should return status code 500 during shutdown grace period](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/grace_period.go#L35)
|
||||||
### [[Shutdown] ingress controller](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/shutdown.go#L30)
|
### [[Shutdown] ingress controller](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/shutdown.go#L30)
|
||||||
- [should shutdown in less than 60 secons without pending connections](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/shutdown.go#L40)
|
- [should shutdown in less than 60 seconds without pending connections](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/shutdown.go#L40)
|
||||||
### [[Shutdown] Graceful shutdown with pending request](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/slow_requests.go#L25)
|
### [[Shutdown] Graceful shutdown with pending request](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/slow_requests.go#L25)
|
||||||
- [should let slow requests finish before shutting down](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/slow_requests.go#L33)
|
- [should let slow requests finish before shutting down](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/gracefulshutdown/slow_requests.go#L33)
|
||||||
### [[Ingress] DeepInspection](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/ingress/deep_inspection.go#L27)
|
### [[Ingress] DeepInspection](https://github.com/kubernetes/ingress-nginx/tree/main//test/e2e/ingress/deep_inspection.go#L27)
|
||||||
|
@ -23,7 +23,7 @@ Customization | [External authentication with response header propagation](custo
|
|||||||
Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO
|
Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO
|
||||||
Features | [Rewrite](rewrite/README.md) | TODO | TODO
|
Features | [Rewrite](rewrite/README.md) | TODO | TODO
|
||||||
Features | [Session stickiness](affinity/cookie/README.md) | route requests consistently to the same endpoint | Advanced
|
Features | [Session stickiness](affinity/cookie/README.md) | route requests consistently to the same endpoint | Advanced
|
||||||
Features | [Canary Deployments](canary/README.md) | weighted canary routing to a seperate deployment | Intermediate
|
Features | [Canary Deployments](canary/README.md) | weighted canary routing to a separate deployment | Intermediate
|
||||||
Scaling | [Static IP](static-ip/README.md) | a single ingress gets a single static IP | Intermediate
|
Scaling | [Static IP](static-ip/README.md) | a single ingress gets a single static IP | Intermediate
|
||||||
TLS | [Multi TLS certificate termination](multi-tls/README.md) | TODO | TODO
|
TLS | [Multi TLS certificate termination](multi-tls/README.md) | TODO | TODO
|
||||||
TLS | [TLS termination](tls-termination/README.md) | TODO | TODO
|
TLS | [TLS termination](tls-termination/README.md) | TODO | TODO
|
||||||
|
@ -1,25 +1,25 @@
|
|||||||
# OpenPolicyAgent and pathType enforcing
|
# OpenPolicyAgent and pathType enforcing
|
||||||
|
|
||||||
Ingress API allows users to specify different [pathType](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
|
Ingress API allows users to specify different [pathType](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)
|
||||||
on Ingress object.
|
on Ingress object.
|
||||||
|
|
||||||
While pathType `Exact` and `Prefix` should allow only a small set of characters, pathType `ImplementationSpecific`
|
While pathType `Exact` and `Prefix` should allow only a small set of characters, pathType `ImplementationSpecific`
|
||||||
allows any characters, as it may contain regexes, variables and other features that may be specific of the Ingress
|
allows any characters, as it may contain regexes, variables and other features that may be specific of the Ingress
|
||||||
Controller being used.
|
Controller being used.
|
||||||
|
|
||||||
This means that the Ingress Admins (the persona who deployed the Ingress Controller) should trust the users
|
This means that the Ingress Admins (the persona who deployed the Ingress Controller) should trust the users
|
||||||
allowed to use `pathType: ImplementationSpecific`, as this may allow arbitrary configuration, and this
|
allowed to use `pathType: ImplementationSpecific`, as this may allow arbitrary configuration, and this
|
||||||
configuration may end on the proxy (aka Nginx) configuration.
|
configuration may end on the proxy (aka Nginx) configuration.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
The example in this repo uses [Gatekeeper](https://open-policy-agent.github.io/gatekeeper/website/) to block the usage of `pathType: ImplementationSpecific`,
|
The example in this repo uses [Gatekeeper](https://open-policy-agent.github.io/gatekeeper/website/) to block the usage of `pathType: ImplementationSpecific`,
|
||||||
allowing just a specific list of namespaces to use it.
|
allowing just a specific list of namespaces to use it.
|
||||||
|
|
||||||
It is recommended that the admin modifies this rules to enforce a specific set of characters when the usage of ImplementationSpecific
|
It is recommended that the admin modifies this rules to enforce a specific set of characters when the usage of ImplementationSpecific
|
||||||
is allowed, or in ways that best suits their needs.
|
is allowed, or in ways that best suits their needs.
|
||||||
|
|
||||||
First, the `ConstraintTemplate` from [template.yaml](template.yaml) will define a rule that validates if the Ingress object
|
First, the `ConstraintTemplate` from [template.yaml](template.yaml) will define a rule that validates if the Ingress object
|
||||||
is being created on an excempted namespace, and case not, will validate its pathType.
|
is being created on an exempted namespace, and case not, will validate its pathType.
|
||||||
|
|
||||||
Then, the rule `K8sBlockIngressPathType` contained in [rule.yaml](rule.yaml) will define the parameters: what kind of
|
Then, the rule `K8sBlockIngressPathType` contained in [rule.yaml](rule.yaml) will define the parameters: what kind of
|
||||||
object should be verified (Ingress), what are the excempted namespaces, and what kinds of pathType are blocked.
|
object should be verified (Ingress), what are the exempted namespaces, and what kinds of pathType are blocked.
|
||||||
|
@ -17,11 +17,11 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
blockedTypes:
|
blockedTypes:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
namespacesExceptions:
|
namespacesExceptions:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
targets:
|
targets:
|
||||||
- target: admission.k8s.gatekeeper.sh
|
- target: admission.k8s.gatekeeper.sh
|
||||||
@ -31,8 +31,8 @@ spec:
|
|||||||
violation[{"msg": msg}] {
|
violation[{"msg": msg}] {
|
||||||
input.review.kind.kind == "Ingress"
|
input.review.kind.kind == "Ingress"
|
||||||
ns := input.review.object.metadata.namespace
|
ns := input.review.object.metadata.namespace
|
||||||
excemptNS := [good | excempts = input.parameters.namespacesExceptions[_] ; good = excempts == ns]
|
exemptNS := [good | exempts = input.parameters.namespacesExceptions[_] ; good = exempts == ns]
|
||||||
not any(excemptNS)
|
not any(exemptNS)
|
||||||
pathType := object.get(input.review.object.spec.rules[_].http.paths[_], "pathType", "")
|
pathType := object.get(input.review.object.spec.rules[_].http.paths[_], "pathType", "")
|
||||||
blockedPath := [blocked | blockedTypes = input.parameters.blockedTypes[_] ; blocked = blockedTypes == pathType]
|
blockedPath := [blocked | blockedTypes = input.parameters.blockedTypes[_] ; blocked = blockedTypes == pathType]
|
||||||
any(blockedPath)
|
any(blockedPath)
|
||||||
|
@ -65,7 +65,7 @@ otel-max-queuesize
|
|||||||
|
|
||||||
# The delay interval in milliseconds between two consecutive exports.
|
# The delay interval in milliseconds between two consecutive exports.
|
||||||
otel-schedule-delay-millis
|
otel-schedule-delay-millis
|
||||||
|
|
||||||
# How long the export can run before it is cancelled.
|
# How long the export can run before it is cancelled.
|
||||||
otel-schedule-delay-millis
|
otel-schedule-delay-millis
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ graph TB
|
|||||||
end
|
end
|
||||||
|
|
||||||
subgraph otel
|
subgraph otel
|
||||||
otc["Otel Collector"]
|
otc["Otel Collector"]
|
||||||
end
|
end
|
||||||
|
|
||||||
subgraph observability
|
subgraph observability
|
||||||
@ -190,7 +190,7 @@ To install the example and collectors run:
|
|||||||
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||||
helm repo add grafana https://grafana.github.io/helm-charts
|
helm repo add grafana https://grafana.github.io/helm-charts
|
||||||
helm repo update
|
helm repo update
|
||||||
# deply cert-manager needed for OpenTelemetry collector operator
|
# deploy cert-manager needed for OpenTelemetry collector operator
|
||||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml
|
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml
|
||||||
# create observability namespace
|
# create observability namespace
|
||||||
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/namespace.yaml
|
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/namespace.yaml
|
||||||
|
@ -186,7 +186,7 @@ func Test_Patching_objects(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// This is to preserve old behavior and log format, it could be improved.
|
// This is to preserve old behavior and log format, it could be improved.
|
||||||
t.Run("diffent_non_empty_names_are_specified_for_validating_and_mutating_webhook", func(t *testing.T) {
|
t.Run("different_non_empty_names_are_specified_for_validating_and_mutating_webhook", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
k := testK8sWithUnpatchedObjects()
|
k := testK8sWithUnpatchedObjects()
|
||||||
|
@ -47,7 +47,7 @@ type AdmissionControllerServer struct {
|
|||||||
AdmissionController AdmissionController
|
AdmissionController AdmissionController
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAdmissionControllerServer instanciates an admission controller server with
|
// NewAdmissionControllerServer instantiates an admission controller server with
|
||||||
// a default codec
|
// a default codec
|
||||||
func NewAdmissionControllerServer(ac AdmissionController) *AdmissionControllerServer {
|
func NewAdmissionControllerServer(ac AdmissionController) *AdmissionControllerServer {
|
||||||
return &AdmissionControllerServer{
|
return &AdmissionControllerServer{
|
||||||
|
@ -23,22 +23,22 @@ import (
|
|||||||
"k8s.io/ingress-nginx/internal/ingress/resolver"
|
"k8s.io/ingress-nginx/internal/ingress/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LB Alghorithms are defined in https://github.com/kubernetes/ingress-nginx/blob/d3e75b056f77be54e01bdb18675f1bb46caece31/rootfs/etc/nginx/lua/balancer.lua#L28
|
// LB Algorithms are defined in https://github.com/kubernetes/ingress-nginx/blob/d3e75b056f77be54e01bdb18675f1bb46caece31/rootfs/etc/nginx/lua/balancer.lua#L28
|
||||||
|
|
||||||
const (
|
const (
|
||||||
loadBalanceAlghoritmAnnotation = "load-balance"
|
loadBalanceAlgorithmAnnotation = "load-balance"
|
||||||
)
|
)
|
||||||
|
|
||||||
var loadBalanceAlghoritms = []string{"round_robin", "chash", "chashsubset", "sticky_balanced", "sticky_persistent", "ewma"}
|
var loadBalanceAlgorithms = []string{"round_robin", "chash", "chashsubset", "sticky_balanced", "sticky_persistent", "ewma"}
|
||||||
|
|
||||||
var loadBalanceAnnotations = parser.Annotation{
|
var loadBalanceAnnotations = parser.Annotation{
|
||||||
Group: "backend",
|
Group: "backend",
|
||||||
Annotations: parser.AnnotationFields{
|
Annotations: parser.AnnotationFields{
|
||||||
loadBalanceAlghoritmAnnotation: {
|
loadBalanceAlgorithmAnnotation: {
|
||||||
Validator: parser.ValidateOptions(loadBalanceAlghoritms, true, true),
|
Validator: parser.ValidateOptions(loadBalanceAlgorithms, true, true),
|
||||||
Scope: parser.AnnotationScopeLocation,
|
Scope: parser.AnnotationScopeLocation,
|
||||||
Risk: parser.AnnotationRiskLow,
|
Risk: parser.AnnotationRiskLow,
|
||||||
Documentation: `This annotation allows setting the load balancing alghorithm that should be used. If none is specified, defaults to
|
Documentation: `This annotation allows setting the load balancing algorithm that should be used. If none is specified, defaults to
|
||||||
the default configured by Ingress admin, otherwise to round_robin`,
|
the default configured by Ingress admin, otherwise to round_robin`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -61,7 +61,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
|
|||||||
// used to indicate if the location/s contains a fragment of
|
// used to indicate if the location/s contains a fragment of
|
||||||
// configuration to be included inside the paths of the rules
|
// configuration to be included inside the paths of the rules
|
||||||
func (a loadbalancing) Parse(ing *networking.Ingress) (interface{}, error) {
|
func (a loadbalancing) Parse(ing *networking.Ingress) (interface{}, error) {
|
||||||
return parser.GetStringAnnotation(loadBalanceAlghoritmAnnotation, ing, a.annotationConfig.Annotations)
|
return parser.GetStringAnnotation(loadBalanceAlgorithmAnnotation, ing, a.annotationConfig.Annotations)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a loadbalancing) GetDocumentation() parser.AnnotationFields {
|
func (a loadbalancing) GetDocumentation() parser.AnnotationFields {
|
||||||
|
@ -90,7 +90,7 @@ func TestParse(t *testing.T) {
|
|||||||
Target: "http://some.test.env.com:2121/$someparam=1&$someotherparam=2",
|
Target: "http://some.test.env.com:2121/$someparam=1&$someotherparam=2",
|
||||||
Host: "some.test.env.com",
|
Host: "some.test.env.com",
|
||||||
}},
|
}},
|
||||||
{map[string]string{backendURL: "http://some.test.env.com", host: "someInvalidParm.%^&*()_=!@#'\""}, &Config{
|
{map[string]string{backendURL: "http://some.test.env.com", host: "someInvalidParam.%^&*()_=!@#'\""}, &Config{
|
||||||
Source: ngxURI,
|
Source: ngxURI,
|
||||||
RequestBody: "on",
|
RequestBody: "on",
|
||||||
Target: "http://some.test.env.com",
|
Target: "http://some.test.env.com",
|
||||||
|
@ -33,7 +33,7 @@ var serverSnippetAnnotations = parser.Annotation{
|
|||||||
serverSnippetAnnotation: {
|
serverSnippetAnnotation: {
|
||||||
Validator: parser.ValidateNull,
|
Validator: parser.ValidateNull,
|
||||||
Scope: parser.AnnotationScopeIngress,
|
Scope: parser.AnnotationScopeIngress,
|
||||||
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configutations
|
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configurations
|
||||||
Documentation: `This annotation allows setting a custom NGINX configuration on a server block. This annotation does not contain any validation and it's usage is not recommended!`,
|
Documentation: `This annotation allows setting a custom NGINX configuration on a server block. This annotation does not contain any validation and it's usage is not recommended!`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -34,7 +34,7 @@ var serviceUpstreamAnnotations = parser.Annotation{
|
|||||||
serviceUpstreamAnnotation: {
|
serviceUpstreamAnnotation: {
|
||||||
Validator: parser.ValidateBool,
|
Validator: parser.ValidateBool,
|
||||||
Scope: parser.AnnotationScopeIngress,
|
Scope: parser.AnnotationScopeIngress,
|
||||||
Risk: parser.AnnotationRiskLow, // Critical, this annotation is not validated at all and allows arbitrary configutations
|
Risk: parser.AnnotationRiskLow, // Critical, this annotation is not validated at all and allows arbitrary configurations
|
||||||
Documentation: `This annotation makes NGINX use Service's Cluster IP and Port instead of Endpoints as the backend endpoints`,
|
Documentation: `This annotation makes NGINX use Service's Cluster IP and Port instead of Endpoints as the backend endpoints`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -33,7 +33,7 @@ var configurationSnippetAnnotations = parser.Annotation{
|
|||||||
configurationSnippetAnnotation: {
|
configurationSnippetAnnotation: {
|
||||||
Validator: parser.ValidateNull,
|
Validator: parser.ValidateNull,
|
||||||
Scope: parser.AnnotationScopeLocation,
|
Scope: parser.AnnotationScopeLocation,
|
||||||
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configutations
|
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configurations
|
||||||
Documentation: `This annotation allows setting a custom NGINX configuration on a location block. This annotation does not contain any validation and it's usage is not recommended!`,
|
Documentation: `This annotation allows setting a custom NGINX configuration on a location block. This annotation does not contain any validation and it's usage is not recommended!`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -33,7 +33,7 @@ var streamSnippetAnnotations = parser.Annotation{
|
|||||||
streamSnippetAnnotation: {
|
streamSnippetAnnotation: {
|
||||||
Validator: parser.ValidateNull,
|
Validator: parser.ValidateNull,
|
||||||
Scope: parser.AnnotationScopeIngress,
|
Scope: parser.AnnotationScopeIngress,
|
||||||
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configutations
|
Risk: parser.AnnotationRiskCritical, // Critical, this annotation is not validated at all and allows arbitrary configurations
|
||||||
Documentation: `This annotation allows setting a custom NGINX configuration on a stream block. This annotation does not contain any validation and it's usage is not recommended!`,
|
Documentation: `This annotation allows setting a custom NGINX configuration on a stream block. This annotation does not contain any validation and it's usage is not recommended!`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -119,7 +119,7 @@ type Configuration struct {
|
|||||||
// By default this is disabled
|
// By default this is disabled
|
||||||
AllowBackendServerHeader bool `json:"allow-backend-server-header"`
|
AllowBackendServerHeader bool `json:"allow-backend-server-header"`
|
||||||
|
|
||||||
// AccessLogParams sets additionals params for access_log
|
// AccessLogParams sets additional params for access_log
|
||||||
// http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log
|
// http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log
|
||||||
// By default it's empty
|
// By default it's empty
|
||||||
AccessLogParams string `json:"access-log-params,omitempty"`
|
AccessLogParams string `json:"access-log-params,omitempty"`
|
||||||
@ -418,7 +418,7 @@ type Configuration struct {
|
|||||||
// Example '60s'
|
// Example '60s'
|
||||||
ProxyProtocolHeaderTimeout time.Duration `json:"proxy-protocol-header-timeout,omitempty"`
|
ProxyProtocolHeaderTimeout time.Duration `json:"proxy-protocol-header-timeout,omitempty"`
|
||||||
|
|
||||||
// Enables or disables the directive aio_write that writes files files asynchronously
|
// Enables or disables the directive aio_write that writes files asynchronously
|
||||||
// https://nginx.org/en/docs/http/ngx_http_core_module.html#aio_write
|
// https://nginx.org/en/docs/http/ngx_http_core_module.html#aio_write
|
||||||
EnableAioWrite bool `json:"enable-aio-write,omitempty"`
|
EnableAioWrite bool `json:"enable-aio-write,omitempty"`
|
||||||
|
|
||||||
@ -612,7 +612,7 @@ type Configuration struct {
|
|||||||
// Default: 0.01
|
// Default: 0.01
|
||||||
OtelSamplerRatio float32 `json:"otel-sampler-ratio"`
|
OtelSamplerRatio float32 `json:"otel-sampler-ratio"`
|
||||||
|
|
||||||
// OtelSamplerParentBased specifies the parent based sampler to be use for any traces created
|
// OtelSamplerParentBased specifies the parent based sampler to be used for any traces created
|
||||||
// Default: true
|
// Default: true
|
||||||
OtelSamplerParentBased bool `json:"otel-sampler-parent-based"`
|
OtelSamplerParentBased bool `json:"otel-sampler-parent-based"`
|
||||||
|
|
||||||
@ -709,7 +709,7 @@ type Configuration struct {
|
|||||||
DefaultSSLCertificate *ingress.SSLCert `json:"-"`
|
DefaultSSLCertificate *ingress.SSLCert `json:"-"`
|
||||||
|
|
||||||
// ProxySSLLocationOnly controls whether the proxy-ssl parameters defined in the
|
// ProxySSLLocationOnly controls whether the proxy-ssl parameters defined in the
|
||||||
// proxy-ssl-* annotations are applied on on location level only in the nginx.conf file
|
// proxy-ssl-* annotations are applied on location level only in the nginx.conf file
|
||||||
// Default is that those are applied on server level, too
|
// Default is that those are applied on server level, too
|
||||||
ProxySSLLocationOnly bool `json:"proxy-ssl-location-only"`
|
ProxySSLLocationOnly bool `json:"proxy-ssl-location-only"`
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func getEndpointsFromSlices(s *corev1.Service, port *corev1.ServicePort, proto c
|
|||||||
useTopologyHints = false
|
useTopologyHints = false
|
||||||
if zoneForHints != emptyZone {
|
if zoneForHints != emptyZone {
|
||||||
useTopologyHints = true
|
useTopologyHints = true
|
||||||
// check if all endpointslices has zone hints
|
// check if all endpointslices have zone hints
|
||||||
for _, ep := range eps.Endpoints {
|
for _, ep := range eps.Endpoints {
|
||||||
if ep.Hints == nil || len(ep.Hints.ForZones) == 0 {
|
if ep.Hints == nil || len(ep.Hints.ForZones) == 0 {
|
||||||
useTopologyHints = false
|
useTopologyHints = false
|
||||||
|
@ -410,7 +410,7 @@ func TestCleanTempNginxCfg(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:unparam // Ingnore `network` always receives `"tcp"` error
|
//nolint:unparam // Ignore `network` always receives `"tcp"` error
|
||||||
func tryListen(network, address string) (l net.Listener, err error) {
|
func tryListen(network, address string) (l net.Listener, err error) {
|
||||||
condFunc := func() (bool, error) {
|
condFunc := func() (bool, error) {
|
||||||
l, err = net.Listen(network, address)
|
l, err = net.Listen(network, address)
|
||||||
|
@ -88,7 +88,7 @@ func TestEndpointSliceLister(t *testing.T) {
|
|||||||
}
|
}
|
||||||
eps, err := el.MatchByKey(key)
|
eps, err := el.MatchByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpeted error %v", err)
|
t.Errorf("unexpected error %v", err)
|
||||||
}
|
}
|
||||||
if err == nil && len(eps) != 1 {
|
if err == nil && len(eps) != 1 {
|
||||||
t.Errorf("expected one slice %v, error, got %d slices", endpointSlice, len(eps))
|
t.Errorf("expected one slice %v, error, got %d slices", endpointSlice, len(eps))
|
||||||
@ -130,7 +130,7 @@ func TestEndpointSliceLister(t *testing.T) {
|
|||||||
}
|
}
|
||||||
eps, err := el.MatchByKey(key)
|
eps, err := el.MatchByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpeted error %v", err)
|
t.Errorf("unexpected error %v", err)
|
||||||
}
|
}
|
||||||
if len(eps) != 1 {
|
if len(eps) != 1 {
|
||||||
t.Errorf("expected one slice %v, error, got %d slices", endpointSlice, len(eps))
|
t.Errorf("expected one slice %v, error, got %d slices", endpointSlice, len(eps))
|
||||||
|
@ -1208,13 +1208,13 @@ func TestStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}(updateCh)
|
}(updateCh)
|
||||||
|
|
||||||
namesapceSelector, err := labels.Parse("foo=bar")
|
namespaceSelector, err := labels.Parse("foo=bar")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
storer := New(
|
storer := New(
|
||||||
ns,
|
ns,
|
||||||
namesapceSelector,
|
namespaceSelector,
|
||||||
fmt.Sprintf("%v/config", ns),
|
fmt.Sprintf("%v/config", ns),
|
||||||
fmt.Sprintf("%v/tcp", ns),
|
fmt.Sprintf("%v/tcp", ns),
|
||||||
fmt.Sprintf("%v/udp", ns),
|
fmt.Sprintf("%v/udp", ns),
|
||||||
@ -1274,7 +1274,7 @@ func TestStore(t *testing.T) {
|
|||||||
t.Errorf("expected 0 events of type Delete but %v occurred", del)
|
t.Errorf("expected 0 events of type Delete but %v occurred", del)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
// test add ingress with secret it doesn't exists and then add secret
|
// test add ingress with secret it doesn't exist and then add secret
|
||||||
// check secret is generated on fs
|
// check secret is generated on fs
|
||||||
// check ocsp
|
// check ocsp
|
||||||
// check invalid secret (missing crt)
|
// check invalid secret (missing crt)
|
||||||
|
@ -129,7 +129,7 @@ func NewNginxCommand() NginxCommand {
|
|||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecCommand instanciates an exec.Cmd object to call nginx program
|
// ExecCommand instantiates an exec.Cmd object to call nginx program
|
||||||
func (nc NginxCommand) ExecCommand(args ...string) *exec.Cmd {
|
func (nc NginxCommand) ExecCommand(args ...string) *exec.Cmd {
|
||||||
cmdArgs := []string{}
|
cmdArgs := []string{}
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ func (cm *Controller) IncCheckErrorCount(namespace, name string) {
|
|||||||
cm.checkIngressOperationErrors.MustCurryWith(cm.constLabels).With(labels).Inc()
|
cm.checkIngressOperationErrors.MustCurryWith(cm.constLabels).With(labels).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IncOrphanIngress sets the the orphaned ingress gauge to one
|
// IncOrphanIngress sets the orphaned ingress gauge to one
|
||||||
func (cm *Controller) IncOrphanIngress(namespace, name, orphanityType string) {
|
func (cm *Controller) IncOrphanIngress(namespace, name, orphanityType string) {
|
||||||
labels := prometheus.Labels{
|
labels := prometheus.Labels{
|
||||||
"namespace": namespace,
|
"namespace": namespace,
|
||||||
@ -235,7 +235,7 @@ func (cm *Controller) IncOrphanIngress(namespace, name, orphanityType string) {
|
|||||||
cm.OrphanIngress.MustCurryWith(cm.constLabels).With(labels).Set(1.0)
|
cm.OrphanIngress.MustCurryWith(cm.constLabels).With(labels).Set(1.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecOrphanIngress sets the the orphaned ingress gauge to zero (all services has their endpoints)
|
// DecOrphanIngress sets the orphaned ingress gauge to zero (all services has their endpoints)
|
||||||
func (cm *Controller) DecOrphanIngress(namespace, name, orphanityType string) {
|
func (cm *Controller) DecOrphanIngress(namespace, name, orphanityType string) {
|
||||||
labels := prometheus.Labels{
|
labels := prometheus.Labels{
|
||||||
"namespace": namespace,
|
"namespace": namespace,
|
||||||
@ -311,7 +311,7 @@ func (cm *Controller) SetSSLExpireTime(servers []*ingress.Server) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetSSLInfo creates a metric with all certificates informations
|
// SetSSLInfo creates a metric with all certificate information
|
||||||
func (cm *Controller) SetSSLInfo(servers []*ingress.Server) {
|
func (cm *Controller) SetSSLInfo(servers []*ingress.Server) {
|
||||||
for _, s := range servers {
|
for _, s := range servers {
|
||||||
if s.SSLCert == nil || s.SSLCert.Certificate == nil || s.SSLCert.Certificate.SerialNumber == nil {
|
if s.SSLCert == nil || s.SSLCert.Certificate == nil || s.SSLCert.Certificate.SerialNumber == nil {
|
||||||
|
@ -29,10 +29,10 @@ type Resolver interface {
|
|||||||
// GetSecurityConfiguration returns the configuration options from Ingress
|
// GetSecurityConfiguration returns the configuration options from Ingress
|
||||||
GetSecurityConfiguration() defaults.SecurityConfiguration
|
GetSecurityConfiguration() defaults.SecurityConfiguration
|
||||||
|
|
||||||
// GetConfigMap searches for configmap containing the namespace and name usting the character /
|
// GetConfigMap searches for configmap containing the namespace and name using the character /
|
||||||
GetConfigMap(string) (*apiv1.ConfigMap, error)
|
GetConfigMap(string) (*apiv1.ConfigMap, error)
|
||||||
|
|
||||||
// GetSecret searches for secrets containing the namespace and name using a the character /
|
// GetSecret searches for secrets containing the namespace and name using the character /
|
||||||
GetSecret(string) (*apiv1.Secret, error)
|
GetSecret(string) (*apiv1.Secret, error)
|
||||||
|
|
||||||
// GetAuthCertificate resolves a given secret name into an SSL certificate and CRL.
|
// GetAuthCertificate resolves a given secret name into an SSL certificate and CRL.
|
||||||
@ -42,7 +42,7 @@ type Resolver interface {
|
|||||||
// ca.crl: contains the revocation list used for authentication
|
// ca.crl: contains the revocation list used for authentication
|
||||||
GetAuthCertificate(string) (*AuthSSLCert, error)
|
GetAuthCertificate(string) (*AuthSSLCert, error)
|
||||||
|
|
||||||
// GetService searches for services containing the namespace and name using a the character /
|
// GetService searches for services containing the namespace and name using the character /
|
||||||
GetService(string) (*apiv1.Service, error)
|
GetService(string) (*apiv1.Service, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ func (m Mock) GetSecurityConfiguration() defaults.SecurityConfiguration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSecret searches for secrets contenating the namespace and name using a the character /
|
// GetSecret searches for secrets containing the namespace and name using the character /
|
||||||
func (m Mock) GetSecret(string) (*apiv1.Secret, error) {
|
func (m Mock) GetSecret(string) (*apiv1.Secret, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -60,12 +60,12 @@ func (m Mock) GetAuthCertificate(string) (*AuthSSLCert, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetService searches for services contenating the namespace and name using a the character /
|
// GetService searches for services containing the namespace and name using the character /
|
||||||
func (m Mock) GetService(string) (*apiv1.Service, error) {
|
func (m Mock) GetService(string) (*apiv1.Service, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConfigMap searches for configMaps contenating the namespace and name using a the character /
|
// GetConfigMap searches for configMaps containing the namespace and name using the character /
|
||||||
func (m Mock) GetConfigMap(name string) (*apiv1.ConfigMap, error) {
|
func (m Mock) GetConfigMap(name string) (*apiv1.ConfigMap, error) {
|
||||||
if v, ok := m.ConfigMaps[name]; ok {
|
if v, ok := m.ConfigMaps[name]; ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
|
@ -442,7 +442,7 @@ func getFakeHostSSLCert(host string) (cert, key []byte) {
|
|||||||
|
|
||||||
// fullChainCert checks if a certificate file contains issues in the intermediate CA chain
|
// fullChainCert checks if a certificate file contains issues in the intermediate CA chain
|
||||||
// Returns a new certificate with the intermediate certificates.
|
// Returns a new certificate with the intermediate certificates.
|
||||||
// If the certificate does not contains issues with the chain it return an empty byte array
|
// If the certificate does not contain issues with the chain it returns an empty byte array
|
||||||
func fullChainCert(in []byte) ([]byte, error) {
|
func fullChainCert(in []byte) ([]byte, error) {
|
||||||
cert, err := certUtil.DecodeCertificate(in)
|
cert, err := certUtil.DecodeCertificate(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -523,7 +523,7 @@ func (tl *TLSListener) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, e
|
|||||||
return tl.certificate, tl.err
|
return tl.certificate, tl.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TLSConfig instanciates a TLS configuration, always providing an up to date certificate
|
// TLSConfig instantiates a TLS configuration, always providing an up to date certificate
|
||||||
func (tl *TLSListener) TLSConfig() *tls.Config {
|
func (tl *TLSListener) TLSConfig() *tls.Config {
|
||||||
return &tls.Config{
|
return &tls.Config{
|
||||||
GetCertificate: tl.GetCertificate,
|
GetCertificate: tl.GetCertificate,
|
||||||
|
@ -198,10 +198,10 @@ type Server struct {
|
|||||||
Aliases []string `json:"aliases,omitempty"`
|
Aliases []string `json:"aliases,omitempty"`
|
||||||
// RedirectFromToWWW returns if a redirect to/from prefix www is required
|
// RedirectFromToWWW returns if a redirect to/from prefix www is required
|
||||||
RedirectFromToWWW bool `json:"redirectFromToWWW,omitempty"`
|
RedirectFromToWWW bool `json:"redirectFromToWWW,omitempty"`
|
||||||
// CertificateAuth indicates the this server requires mutual authentication
|
// CertificateAuth indicates this server requires mutual authentication
|
||||||
// +optional
|
// +optional
|
||||||
CertificateAuth authtls.Config `json:"certificateAuth"`
|
CertificateAuth authtls.Config `json:"certificateAuth"`
|
||||||
// ProxySSL indicates the this server uses client certificate to access backends
|
// ProxySSL indicates this server uses client certificate to access backends
|
||||||
// +optional
|
// +optional
|
||||||
ProxySSL proxyssl.Config `json:"proxySSL"`
|
ProxySSL proxyssl.Config `json:"proxySSL"`
|
||||||
// ServerSnippet returns the snippet of server
|
// ServerSnippet returns the snippet of server
|
||||||
@ -219,7 +219,7 @@ type Server struct {
|
|||||||
// Location describes an URI inside a server.
|
// Location describes an URI inside a server.
|
||||||
// Also contains additional information about annotations in the Ingress.
|
// Also contains additional information about annotations in the Ingress.
|
||||||
//
|
//
|
||||||
// In some cases when more than one annotations is defined a particular order in the execution
|
// In some cases when more than one annotation is defined a particular order in the execution
|
||||||
// is required.
|
// is required.
|
||||||
// The chain in the execution order of annotations should be:
|
// The chain in the execution order of annotations should be:
|
||||||
// - Denylist
|
// - Denylist
|
||||||
@ -342,7 +342,7 @@ type Location struct {
|
|||||||
// CustomHTTPErrors specifies the error codes that should be intercepted.
|
// CustomHTTPErrors specifies the error codes that should be intercepted.
|
||||||
// +optional
|
// +optional
|
||||||
CustomHTTPErrors []int `json:"custom-http-errors"`
|
CustomHTTPErrors []int `json:"custom-http-errors"`
|
||||||
// ProxyInterceptErrors disables error intecepting when using CustomHTTPErrors
|
// ProxyInterceptErrors disables error interception when using CustomHTTPErrors
|
||||||
// e.g. custom 404 and 503 when service-a does not exist or is not available
|
// e.g. custom 404 and 503 when service-a does not exist or is not available
|
||||||
// but service-a can return 404 and 503 error codes without intercept
|
// but service-a can return 404 and 503 error codes without intercept
|
||||||
// +optional
|
// +optional
|
||||||
|
@ -357,7 +357,7 @@ describe("Sticky", function()
|
|||||||
for _ = 1, 100 do
|
for _ = 1, 100 do
|
||||||
local new_upstream = sticky_balancer_instance:balance()
|
local new_upstream = sticky_balancer_instance:balance()
|
||||||
if change_on_failure == false then
|
if change_on_failure == false then
|
||||||
-- upstream should be the same inspite of error, if change_on_failure option is false
|
-- upstream should be the same in spite of error, if change_on_failure option is false
|
||||||
assert.equal(new_upstream, old_upstream)
|
assert.equal(new_upstream, old_upstream)
|
||||||
else
|
else
|
||||||
-- upstream should change after error, if change_on_failure option is true
|
-- upstream should change after error, if change_on_failure option is true
|
||||||
|
@ -84,7 +84,7 @@ var _ = framework.IngressNginxDescribeSerial("[TopologyHints] topology aware rou
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gotHints {
|
if gotHints {
|
||||||
// we have 2 replics, if there is just one backend it means that we are routing according slices hints to same zone as controller is
|
// we have 2 replicas, if there is just one backend it means that we are routing according slices hints to same zone as controller is
|
||||||
assert.Equal(ginkgo.GinkgoT(), 1, gotBackends)
|
assert.Equal(ginkgo.GinkgoT(), 1, gotBackends)
|
||||||
} else {
|
} else {
|
||||||
// two replicas should have two endpoints without topology hints
|
// two replicas should have two endpoints without topology hints
|
||||||
|
@ -43,7 +43,7 @@ const HTTPBunService = "httpbun"
|
|||||||
// NipService name of external service using nip.io
|
// NipService name of external service using nip.io
|
||||||
const NIPService = "external-nip"
|
const NIPService = "external-nip"
|
||||||
|
|
||||||
// HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork
|
// HTTPBunImage is the default image that is used to deploy HTTPBun with the framework
|
||||||
var HTTPBunImage = os.Getenv("HTTPBUN_IMAGE")
|
var HTTPBunImage = os.Getenv("HTTPBUN_IMAGE")
|
||||||
|
|
||||||
// EchoImage is the default image to be used by the echo service
|
// EchoImage is the default image to be used by the echo service
|
||||||
|
@ -100,7 +100,7 @@ func NewDefaultFramework(baseName string, opts ...func(*Framework)) *Framework {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSimpleFramework makes a new framework that allows the usage of a namespace
|
// NewSimpleFramework makes a new framework that allows the usage of a namespace
|
||||||
// for arbitraty tests.
|
// for arbitrary tests.
|
||||||
func NewSimpleFramework(baseName string, opts ...func(*Framework)) *Framework {
|
func NewSimpleFramework(baseName string, opts ...func(*Framework)) *Framework {
|
||||||
defer ginkgo.GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||||||
f.NewSlowEchoDeployment()
|
f.NewSlowEchoDeployment()
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should shutdown in less than 60 secons without pending connections", func() {
|
ginkgo.It("should shutdown in less than 60 seconds without pending connections", func() {
|
||||||
f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.SlowEchoService, 80, nil))
|
f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.SlowEchoService, 80, nil))
|
||||||
|
|
||||||
f.WaitForNginxServer(host,
|
f.WaitForNginxServer(host,
|
||||||
|
@ -32,8 +32,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
disable = "false"
|
disable = "false"
|
||||||
noAuthLocaltionSetting = "no-auth-locations"
|
noAuthLocationSetting = "no-auth-locations"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.DescribeSetting("[Security] global-auth-url", func() {
|
var _ = framework.DescribeSetting("[Security] global-auth-url", func() {
|
||||||
@ -51,7 +51,7 @@ var _ = framework.DescribeSetting("[Security] global-auth-url", func() {
|
|||||||
fooPath := "/foo"
|
fooPath := "/foo"
|
||||||
barPath := "/bar"
|
barPath := "/bar"
|
||||||
|
|
||||||
noAuthSetting := noAuthLocaltionSetting
|
noAuthSetting := noAuthLocationSetting
|
||||||
noAuthLocations := barPath
|
noAuthLocations := barPath
|
||||||
|
|
||||||
enableGlobalExternalAuthAnnotation := "nginx.ingress.kubernetes.io/enable-global-auth"
|
enableGlobalExternalAuthAnnotation := "nginx.ingress.kubernetes.io/enable-global-auth"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// This is a loadtest under development
|
// This is a loadtest under development
|
||||||
// Test here is spec'd to have 100virtual-users
|
// Test here is spec'd to have 100virtual-users
|
||||||
// Other specs currently similar to smoktest
|
// Other specs currently similar to smoketest
|
||||||
// But loadtest needs testplan that likely uses auth & data-transfer
|
// But loadtest needs testplan that likely uses auth & data-transfer
|
||||||
|
|
||||||
import http from 'k6/http';
|
import http from 'k6/http';
|
||||||
@ -35,7 +35,7 @@ export default function () {
|
|||||||
const req3 = {
|
const req3 = {
|
||||||
params: {
|
params: {
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/x-www-form-urlencoded'
|
'Content-Type': 'application/x-www-form-urlencoded'
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// smotest.js edited after copy/pasting from https://k6.io docs
|
// smoketest.js edited after copy/pasting from https://k6.io docs
|
||||||
// Using this like loadtest because of limited cpu/memory/other
|
// Using this like loadtest because of limited cpu/memory/other
|
||||||
|
|
||||||
import http from 'k6/http';
|
import http from 'k6/http';
|
||||||
@ -22,7 +22,7 @@ export const options = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export default function () {
|
export default function () {
|
||||||
// docs of k6 say this is how to adds host header
|
// docs of k6 say this is how to add host header
|
||||||
// needed as ingress is created with this host value
|
// needed as ingress is created with this host value
|
||||||
const params = {
|
const params = {
|
||||||
headers: {'host': 'test.ingress-nginx-controller.ga'},
|
headers: {'host': 'test.ingress-nginx-controller.ga'},
|
||||||
@ -39,7 +39,7 @@ export default function () {
|
|||||||
const req3 = {
|
const req3 = {
|
||||||
params: {
|
params: {
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
|
Loading…
x
Reference in New Issue
Block a user