diff --git a/hack/lib/util/misc.sh b/hack/lib/util/misc.sh index 0ca5cae3c63c..c19889ec1a34 100644 --- a/hack/lib/util/misc.sh +++ b/hack/lib/util/misc.sh @@ -119,10 +119,11 @@ readonly -f os::util::format_seconds # Return: # None function os::util::sed() { + local sudo="${USE_SUDO:+sudo}" if LANG=C sed --help 2>&1 | grep -q "GNU sed"; then - sed -i'' "$@" + ${sudo} sed -i'' "$@" else - sed -i '' "$@" + ${sudo} sed -i '' "$@" fi } readonly -f os::util::sed @@ -202,4 +203,4 @@ function os::util::curl_etcd() { function os::util::host_platform() { echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" } -readonly -f os::util::host_platform \ No newline at end of file +readonly -f os::util::host_platform diff --git a/hack/test-end-to-end-docker.sh b/hack/test-end-to-end-docker.sh index 97aef2babee1..09cd88751929 100755 --- a/hack/test-end-to-end-docker.sh +++ b/hack/test-end-to-end-docker.sh @@ -21,12 +21,36 @@ fi function cleanup() { return_code=$? os::cleanup::all "${return_code}" + + # restore journald to previous form + if os::util::ensure::system_binary_exists 'systemctl'; then + os::log::info "Restoring journald limits" + ${USE_SUDO:+sudo} mv /etc/systemd/{journald.conf.bak,journald.conf} + ${USE_SUDO:+sudo} systemctl restart systemd-journald.service + # Docker has "some" problems when journald is restarted, so we need to + # restart docker, as well. + ${USE_SUDO:+sudo} systemctl restart docker.service + fi + exit "${return_code}" } trap "cleanup" EXIT os::log::system::start +# This turns-off rate limiting in journald to bypass the problem from +# https://github.com/openshift/origin/issues/12558. +if os::util::ensure::system_binary_exists 'systemctl'; then + os::log::info "Turning off journald limits" + ${USE_SUDO:+sudo} cp /etc/systemd/{journald.conf,journald.conf.bak} + os::util::sed "s/^.*RateLimitInterval.*$/RateLimitInterval=0/g" /etc/systemd/journald.conf + os::util::sed "s/^.*RateLimitBurst.*$/RateLimitBurst=0/g" /etc/systemd/journald.conf + ${USE_SUDO:+sudo} systemctl restart systemd-journald.service + # Docker has "some" problems when journald is restarted, so we need to + # restart docker, as well. + ${USE_SUDO:+sudo} systemctl restart docker.service +fi + # Setup os::log::info "openshift version: `openshift version`" os::log::info "oc version: `oc version`" diff --git a/test/end-to-end/core.sh b/test/end-to-end/core.sh index 7978720083d3..430f4e6b7025 100755 --- a/test/end-to-end/core.sh +++ b/test/end-to-end/core.sh @@ -378,30 +378,19 @@ os::cmd::expect_success 'oc login -u e2e-user' os::cmd::expect_success 'oc project test' os::cmd::expect_success 'oc whoami' -if [[ -n "${SOLTYSH_DEBUG:-}" ]]; then - os::log::info "Running a CLI command in a container using the service account" - os::cmd::expect_success 'oc policy add-role-to-user view -z default' - os::cmd::try_until_success "oc sa get-token default" - oc run cli-with-token --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never -- cli status --loglevel=4 > "${LOG_DIR}/cli-with-token.log" 2>&1 - # TODO remove set +o errexit, once https://github.com/openshift/origin/issues/12558 gets proper fix - set +o errexit - os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token.log'" 'Using in-cluster configuration' - os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token.log'" 'In project test' - set -o errexit - os::cmd::expect_success 'oc delete pod cli-with-token' - oc run cli-with-token-2 --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never -- cli whoami --loglevel=4 > "${LOG_DIR}/cli-with-token2.log" 2>&1 - # TODO remove set +o errexit, once https://github.com/openshift/origin/issues/12558 gets proper fix - set +o errexit - os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token2.log'" 'system:serviceaccount:test:default' - set -o errexit - os::cmd::expect_success 'oc delete pod cli-with-token-2' - oc run kubectl-with-token --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never --command -- kubectl get pods --loglevel=4 > "${LOG_DIR}/kubectl-with-token.log" 2>&1 - # TODO remove set +o errexit, once https://github.com/openshift/origin/issues/12558 gets proper fix - set +o errexit - os::cmd::expect_success_and_text "cat '${LOG_DIR}/kubectl-with-token.log'" 'Using in-cluster configuration' - os::cmd::expect_success_and_text "cat '${LOG_DIR}/kubectl-with-token.log'" 'kubectl-with-token' - set -o errexit -fi +os::log::info "Running a CLI command in a container using the service account" +os::cmd::expect_success 'oc policy add-role-to-user view -z default' +os::cmd::try_until_success "oc sa get-token default" +oc run cli-with-token --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never -- cli status --loglevel=4 > "${LOG_DIR}/cli-with-token.log" 2>&1 +os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token.log'" 'Using in-cluster configuration' +os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token.log'" 'In project test' +os::cmd::expect_success 'oc delete pod cli-with-token' +oc run cli-with-token-2 --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never -- cli whoami --loglevel=4 > "${LOG_DIR}/cli-with-token2.log" 2>&1 +os::cmd::expect_success_and_text "cat '${LOG_DIR}/cli-with-token2.log'" 'system:serviceaccount:test:default' +os::cmd::expect_success 'oc delete pod cli-with-token-2' +oc run kubectl-with-token --attach --image="${IMAGE_PREFIX}:${TAG}" --restart=Never --command -- kubectl get pods --loglevel=4 > "${LOG_DIR}/kubectl-with-token.log" 2>&1 +os::cmd::expect_success_and_text "cat '${LOG_DIR}/kubectl-with-token.log'" 'Using in-cluster configuration' +os::cmd::expect_success_and_text "cat '${LOG_DIR}/kubectl-with-token.log'" 'kubectl-with-token' os::log::info "Testing deployment logs and failing pre and mid hooks ..." # test hook selectors @@ -471,7 +460,7 @@ os::log::info "Validating exec" frontend_pod=$(oc get pod -l deploymentconfig=frontend --template='{{(index .items 0).metadata.name}}') # when running as a restricted pod the registry will run with a pre-allocated # user in the neighborhood of 1000000+. Look for a substring of the pre-allocated uid range -os::cmd::expect_success_and_text "oc exec -p ${frontend_pod} id" '1000' +os::cmd::expect_success_and_text "oc exec ${frontend_pod} id" '1000' os::cmd::expect_success_and_text "oc rsh pod/${frontend_pod} id -u" '1000' os::cmd::expect_success_and_text "oc rsh -T ${frontend_pod} id -u" '1000' diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go index 97a21876fe57..808dc8e84345 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -17,7 +17,9 @@ limitations under the License. package spdy import ( + "bufio" "fmt" + "net" "net/http" "strings" @@ -32,6 +34,19 @@ const HeaderSpdy31 = "SPDY/3.1" type responseUpgrader struct { } +// connWrapper is used to wrap a hijacked connection its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read calls, which will read from the bufio.Reader. This ensures that data +// already inside the used bufio.Reader instance is also read. +type connWrapper struct { + net.Conn + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + return w.bufReader.Read(b) +} + // NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is // capable of upgrading HTTP responses using SPDY/3.1 via the // spdystream package. @@ -62,13 +77,14 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) w.WriteHeader(http.StatusSwitchingProtocols) - conn, _, err := hijacker.Hijack() + conn, bufrw, err := hijacker.Hijack() if err != nil { runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) return nil } - spdyConn, err := NewServerConnection(conn, newStreamHandler) + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler) if err != nil { runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) return nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade_test.go index 5a514dd5bf60..09f16d375add 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade_test.go @@ -20,6 +20,9 @@ import ( "net/http" "net/http/httptest" "testing" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" ) func TestUpgradeResponse(t *testing.T) { @@ -91,3 +94,60 @@ func TestUpgradeResponse(t *testing.T) { } } } + +func TestUpgradeResponseWithRoundTrip(t *testing.T) { + var serverDone chan struct{} + fakeServer := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer close(serverDone) + upgrader := NewResponseUpgrader() + recvStream := make(chan httpstream.Stream, 1) + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { + go func() { + <-replySent + recvStream <- stream + }() + return nil + }) + if conn == nil { + t.Fatal("unexpected nil conn") + } + defer conn.Close() + select { + case stream := <-recvStream: + defer stream.Reset() + headerValue := stream.Headers().Get("mykey") + if headerValue != "myvalue" { + t.Errorf("expected headers.Get(mykey)=myvalue, got %q", headerValue) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for created stream to arrive on server") + } + }) + for i := 0; i < 1000; i++ { + serverDone = make(chan struct{}) + server := httptest.NewServer(fakeServer) + defer server.Close() + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error creating request: %s", err) + } + upgradeRoundTripper := NewRoundTripper(nil) + client := &http.Client{Transport: upgradeRoundTripper} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("unexpected non-nil err from client.Do: %s", err) + } + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("unexpected non-nil err from upgradeRoundTripper.NewConnection: %s", err) + } + defer conn.Close() + headers := http.Header{} + headers.Set("mykey", "myvalue") + _, err = conn.CreateStream(headers) + if err != nil { + t.Fatalf("unexpected non-nil err from conn.CreateStream: %s", err) + } + <-serverDone + } +}