Merge pull request #12025 from vjpai/bazel_unsec
Test grpc++_unsecure in Bazel build
diff --git a/include/grpc++/impl/codegen/call.h b/include/grpc++/impl/codegen/call.h
index f6eefb9..33d8f4c 100644
--- a/include/grpc++/impl/codegen/call.h
+++ b/include/grpc++/impl/codegen/call.h
@@ -349,6 +349,28 @@
bool allow_not_getting_message_;
};
+namespace CallOpGenericRecvMessageHelper {
+class DeserializeFunc {
+ public:
+ virtual Status Deserialize(grpc_byte_buffer* buf) = 0;
+ virtual ~DeserializeFunc() {}
+};
+
+template <class R>
+class DeserializeFuncType final : public DeserializeFunc {
+ public:
+ DeserializeFuncType(R* message) : message_(message) {}
+ Status Deserialize(grpc_byte_buffer* buf) override {
+ return SerializationTraits<R>::Deserialize(buf, message_);
+ }
+
+ ~DeserializeFuncType() override {}
+
+ private:
+ R* message_; // Not a managed pointer because management is external to this
+};
+} // namespace CallOpGenericRecvMessageHelper
+
class CallOpGenericRecvMessage {
public:
CallOpGenericRecvMessage()
@@ -356,9 +378,11 @@
template <class R>
void RecvMessage(R* message) {
- deserialize_ = [message](grpc_byte_buffer* buf) -> Status {
- return SerializationTraits<R>::Deserialize(buf, message);
- };
+ // Use an explicit base class pointer to avoid resolution error in the
+ // following unique_ptr::reset for some old implementations.
+ CallOpGenericRecvMessageHelper::DeserializeFunc* func =
+ new CallOpGenericRecvMessageHelper::DeserializeFuncType<R>(message);
+ deserialize_.reset(func);
}
// Do not change status if no message is received.
@@ -381,7 +405,7 @@
if (recv_buf_) {
if (*status) {
got_message = true;
- *status = deserialize_(recv_buf_).ok();
+ *status = deserialize_->Deserialize(recv_buf_).ok();
} else {
got_message = false;
g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
@@ -392,12 +416,11 @@
*status = false;
}
}
- deserialize_ = DeserializeFunc();
+ deserialize_.reset();
}
private:
- typedef std::function<Status(grpc_byte_buffer*)> DeserializeFunc;
- DeserializeFunc deserialize_;
+ std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_;
grpc_byte_buffer* recv_buf_;
bool allow_not_getting_message_;
};
diff --git a/test/core/util/port.c b/test/core/util/port.c
index f430c54..b1fc722 100644
--- a/test/core/util/port.c
+++ b/test/core/util/port.c
@@ -79,7 +79,7 @@
chosen_ports[num_chosen_ports - 1] = port;
}
-int grpc_pick_unused_port(void) {
+static int grpc_pick_unused_port_impl(void) {
int port = grpc_pick_port_using_server();
if (port != 0) {
chose_port(port);
@@ -88,7 +88,7 @@
return port;
}
-int grpc_pick_unused_port_or_die(void) {
+static int grpc_pick_unused_port_or_die_impl(void) {
int port = grpc_pick_unused_port();
if (port == 0) {
fprintf(stderr,
@@ -101,6 +101,31 @@
return port;
}
-void grpc_recycle_unused_port(int port) { GPR_ASSERT(free_chosen_port(port)); }
+static void grpc_recycle_unused_port_impl(int port) {
+ GPR_ASSERT(free_chosen_port(port));
+}
+
+static grpc_pick_port_functions g_pick_port_functions = {
+ grpc_pick_unused_port_impl, grpc_pick_unused_port_or_die_impl,
+ grpc_recycle_unused_port_impl};
+
+int grpc_pick_unused_port(void) {
+ return g_pick_port_functions.pick_unused_port_fn();
+}
+
+int grpc_pick_unused_port_or_die(void) {
+ return g_pick_port_functions.pick_unused_port_or_die_fn();
+}
+
+void grpc_recycle_unused_port(int port) {
+ g_pick_port_functions.recycle_unused_port_fn(port);
+}
+
+void grpc_set_pick_port_functions(grpc_pick_port_functions functions) {
+ GPR_ASSERT(functions.pick_unused_port_fn != NULL);
+ GPR_ASSERT(functions.pick_unused_port_or_die_fn != NULL);
+ GPR_ASSERT(functions.recycle_unused_port_fn != NULL);
+ g_pick_port_functions = functions;
+}
#endif /* GRPC_TEST_PICK_PORT */
diff --git a/test/core/util/port.h b/test/core/util/port.h
index 154e8f8..602099d 100644
--- a/test/core/util/port.h
+++ b/test/core/util/port.h
@@ -23,6 +23,12 @@
extern "C" {
#endif
+typedef struct grpc_pick_port_functions {
+ int (*pick_unused_port_fn)(void);
+ int (*pick_unused_port_or_die_fn)(void);
+ void (*recycle_unused_port_fn)(int port);
+} grpc_pick_port_functions;
+
/* pick a port number that is currently unused by either tcp or udp. return
0 on failure. */
int grpc_pick_unused_port(void);
@@ -36,6 +42,9 @@
* ports back to the server if it is going to allocate a large number. */
void grpc_recycle_unused_port(int port);
+/** Request the family of pick_port functions in \a functions be used. */
+void grpc_set_pick_port_functions(grpc_pick_port_functions functions);
+
#ifdef __cplusplus
}
#endif
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index 4fef535..b5cff66 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -215,7 +215,8 @@
{
std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) goto done;
- serverlist_cond_.wait(lock);
+ serverlist_cond_.wait(lock, [this] { return serverlist_ready_; });
+ serverlist_ready_ = false;
}
if (client_load_reporting_interval_seconds_ > 0) {
@@ -242,6 +243,7 @@
.drop_token_counts[drop_token_count.load_balance_token()] +=
drop_token_count.num_calls();
}
+ load_report_ready_ = true;
load_report_cond_.notify_one();
}
done:
@@ -285,12 +287,14 @@
const ClientStats& WaitForLoadReport() {
std::unique_lock<std::mutex> lock(mu_);
- load_report_cond_.wait(lock);
+ load_report_cond_.wait(lock, [this] { return load_report_ready_; });
+ load_report_ready_ = false;
return client_stats_;
}
void NotifyDoneWithServerlists() {
std::lock_guard<std::mutex> lock(mu_);
+ serverlist_ready_ = true;
serverlist_cond_.notify_one();
}
@@ -313,7 +317,9 @@
std::vector<ResponseDelayPair> responses_and_delays_;
std::mutex mu_;
std::condition_variable load_report_cond_;
+ bool load_report_ready_ = false;
std::condition_variable serverlist_cond_;
+ bool serverlist_ready_ = false;
ClientStats client_stats_;
bool shutdown_;
};
diff --git a/tools/internal_ci/helper_scripts/prepare_build_macos_interop_rc b/tools/internal_ci/helper_scripts/prepare_build_macos_interop_rc
new file mode 100644
index 0000000..f467ac0
--- /dev/null
+++ b/tools/internal_ci/helper_scripts/prepare_build_macos_interop_rc
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Source this rc script to prepare the environment for MacOS interop
+# builds. This rc script must be used in the root directory of gRPC
+# and is expected to be used before prepare_build_macos_rc
+
+export CONFIG=opt
+
+# Move gRPC repo to directory that Docker for Mac has drive access to
+mkdir /Users/kbuilder/workspace
+cp -R ./ /Users/kbuilder/workspace/grpc
+cd /Users/kbuilder/workspace/grpc
+
+# Needed for identifying Docker image sha1
+brew install md5sha1sum
+
+# Set up gRPC-Go and gRPC-Java to test
+git clone --recursive https://github.com/grpc/grpc-go ./../grpc-go
+git clone --recursive https://github.com/grpc/grpc-java ./../grpc-java
+
+# Set up Docker for Mac
+docker-machine create -d virtualbox --virtualbox-share-folder "/Users/kbuilder/workspace:" default
+docker-machine env default
+eval $(docker-machine env default)
+
diff --git a/tools/internal_ci/macos/grpc_basictests.cfg b/tools/internal_ci/macos/grpc_basictests.cfg
deleted file mode 100644
index 3faba2f..0000000
--- a/tools/internal_ci/macos/grpc_basictests.cfg
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2017 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Config file for the internal CI (in protobuf text format)
-
-# Location of the continuous shell script in repository.
-build_file: "grpc/tools/internal_ci/macos/grpc_run_tests_matrix.sh"
-gfile_resources: "/bigstore/grpc-testing-secrets/gcp_credentials/GrpcTesting-d0eeee2db331.json"
-timeout_mins: 240
-action {
- define_artifacts {
- regex: "**/*sponge_log.xml"
- regex: "github/grpc/reports/**"
- }
-}
-
-env_vars {
- key: "RUN_TESTS_FLAGS"
- value: "-f basictests macos --internal_ci -j 1 --inner_jobs 4 --bq_result_table aggregate_results"
-}
diff --git a/tools/internal_ci/macos/grpc_interop.sh b/tools/internal_ci/macos/grpc_interop.sh
index 07601a6..b03401b 100755
--- a/tools/internal_ci/macos/grpc_interop.sh
+++ b/tools/internal_ci/macos/grpc_interop.sh
@@ -18,6 +18,7 @@
# change to grpc repo root
cd $(dirname $0)/../../..
-source tools/internal_ci/helper_scripts/prepare_build_interop_rc
+source tools/internal_ci/helper_scripts/prepare_build_macos_interop_rc
+source tools/internal_ci/helper_scripts/prepare_build_macos_rc
tools/run_tests/run_interop_tests.py -l objc -s all --use_docker -t -j 1
diff --git a/tools/run_tests/dockerize/build_interop_stress_image.sh b/tools/run_tests/dockerize/build_interop_stress_image.sh
deleted file mode 100755
index acb566f..0000000
--- a/tools/run_tests/dockerize/build_interop_stress_image.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This script is invoked by run_interop_tests.py to build the docker image
-# for interop testing. You should never need to call this script on your own.
-
-set -x
-
-# Params:
-# INTEROP_IMAGE - Name of tag of the final interop image
-# INTEROP_IMAGE_REPOSITORY_TAG - Optional. If set, the created image will be tagged using
-# the command: 'docker tag $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG'
-# BASE_NAME - Base name used to locate the base Dockerfile and build script
-# BUILD_TYPE - The 'CONFIG' variable passed to the 'make' command (example:
-# asan, tsan. Default value: opt).
-# TTY_FLAG - optional -t flag to make docker allocate tty
-# BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
-# docker run command
-
-cd `dirname $0`/../../..
-GRPC_ROOT=`pwd`
-MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
-
-GRPC_JAVA_ROOT=`cd ../grpc-java && pwd`
-if [ "$GRPC_JAVA_ROOT" != "" ]
-then
- MOUNT_ARGS+=" -v $GRPC_JAVA_ROOT:/var/local/jenkins/grpc-java:ro"
-else
- echo "WARNING: grpc-java not found, it won't be mounted to the docker container."
-fi
-
-GRPC_GO_ROOT=`cd ../grpc-go && pwd`
-if [ "$GRPC_GO_ROOT" != "" ]
-then
- MOUNT_ARGS+=" -v $GRPC_GO_ROOT:/var/local/jenkins/grpc-go:ro"
-else
- echo "WARNING: grpc-go not found, it won't be mounted to the docker container."
-fi
-
-mkdir -p /tmp/ccache
-
-# Mount service account dir if available.
-# If service_directory does not contain the service account JSON file,
-# some of the tests will fail.
-if [ -e $HOME/service_account ]
-then
- MOUNT_ARGS+=" -v $HOME/service_account:/var/local/jenkins/service_account:ro"
-fi
-
-# Use image name based on Dockerfile checksum
-BASE_IMAGE=${BASE_NAME}_base:`sha1sum tools/dockerfile/stress_test/$BASE_NAME/Dockerfile | cut -f1 -d\ `
-
-# Make sure base docker image has been built. Should be instantaneous if so.
-docker build -t $BASE_IMAGE --force-rm=true tools/dockerfile/stress_test/$BASE_NAME || exit $?
-
-# Create a local branch so the child Docker script won't complain
-git branch -f jenkins-docker
-
-CONTAINER_NAME="build_${BASE_NAME}_$(uuidgen)"
-
-# Prepare image for interop tests, commit it on success.
-(docker run \
- -e CCACHE_DIR=/tmp/ccache \
- -e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
- -e BUILD_TYPE=${BUILD_TYPE:=opt} \
- -i $TTY_FLAG \
- $MOUNT_ARGS \
- $BUILD_INTEROP_DOCKER_EXTRA_ARGS \
- -v /tmp/ccache:/tmp/ccache \
- --name=$CONTAINER_NAME \
- $BASE_IMAGE \
- bash -l /var/local/jenkins/grpc/tools/dockerfile/stress_test/$BASE_NAME/build_interop_stress.sh \
- && docker commit $CONTAINER_NAME $INTEROP_IMAGE \
- && ( if [ -n "$INTEROP_IMAGE_REPOSITORY_TAG" ]; then docker tag $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG ; fi ) \
- && echo "Successfully built image $INTEROP_IMAGE")
-EXITCODE=$?
-
-# remove intermediate container, possibly killing it first
-docker rm -f $CONTAINER_NAME
-
-exit $EXITCODE
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 106c7e0..19186be 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -74,19 +74,21 @@
bq = big_query_utils.create_big_query()
query = """
- SELECT
- test_name,
- SUM(result != 'PASSED'
- AND result != 'SKIPPED') AS count_failed,
- FROM
- [grpc-testing:jenkins_test_results.aggregate_results]
- WHERE
- timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
- AND NOT REGEXP_MATCH(job_name, '.*portability.*')
- GROUP BY
- test_name
- HAVING
- count_failed > 0"""
+SELECT
+ filtered_test_name,
+ FROM (
+ SELECT
+ REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
+ result
+ FROM
+ [grpc-testing:jenkins_test_results.aggregate_results]
+ WHERE
+ timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
+ AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
+GROUP BY
+ filtered_test_name
+HAVING
+ SUM(result != 'PASSED' AND result != 'SKIPPED') > 0"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)